instance_id
stringlengths 12
57
| base_commit
stringlengths 40
40
| created_at
stringdate 2015-01-06 14:05:07
2025-04-29 17:56:51
| environment_setup_commit
stringlengths 40
40
| hints_text
stringlengths 0
158k
| patch
stringlengths 261
20.8k
| problem_statement
stringlengths 11
52.5k
| repo
stringlengths 7
53
| test_patch
stringlengths 280
206k
| meta
dict | version
stringclasses 463
values | install_config
dict | requirements
stringlengths 93
34k
⌀ | environment
stringlengths 772
20k
⌀ | FAIL_TO_PASS
sequencelengths 1
856
| FAIL_TO_FAIL
sequencelengths 0
536
| PASS_TO_PASS
sequencelengths 0
7.87k
| PASS_TO_FAIL
sequencelengths 0
92
| license_name
stringclasses 35
values | __index_level_0__
int64 11
21.4k
| num_tokens_patch
int64 103
4.99k
| before_filepaths
sequencelengths 0
14
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tobymao__sqlglot-4263 | 7fc0055fb04713ba047baa5eda1ce0baf1cc79e2 | 2024-10-17 21:52:50 | 4543fb3cd052dfb20428f5a6254b38def9e756ee | diff --git a/sqlglot/optimizer/scope.py b/sqlglot/optimizer/scope.py
index 0bd7ab26..6612f610 100644
--- a/sqlglot/optimizer/scope.py
+++ b/sqlglot/optimizer/scope.py
@@ -679,6 +679,8 @@ def _traverse_tables(scope):
expressions.extend(scope.expression.args.get("laterals") or [])
for expression in expressions:
+ if isinstance(expression, exp.Final):
+ expression = expression.this
if isinstance(expression, exp.Table):
table_name = expression.name
source_name = expression.alias_or_name
| Empty 'sources' in scope when using `FINAL` modifier in ClickHouse SQL query
```python
import sqlglot
from sqlglot.optimizer import traverse_scope
exp = sqlglot.parse_one('SELECT * FROM x FINAL', read="clickhouse")
scopes = traverse_scope(exp)
print(len(scopes[0].sources))
```
| tobymao/sqlglot | diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index 3603603f..56ff06ff 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -2,6 +2,7 @@ from datetime import date
from sqlglot import exp, parse_one
from sqlglot.dialects import ClickHouse
from sqlglot.expressions import convert
+from sqlglot.optimizer import traverse_scope
from tests.dialects.test_dialect import Validator
from sqlglot.errors import ErrorLevel
@@ -1123,3 +1124,9 @@ LIFETIME(MIN 0 MAX 0)""",
self.validate_identity(
"SELECT * FROM arrays_test ARRAY JOIN [1, 2, 3] AS arr_external1, ['a', 'b', 'c'] AS arr_external2, splitByString(',', 'asd,qwerty,zxc') AS arr_external3"
)
+
+ def test_traverse_scope(self):
+ sql = "SELECT * FROM t FINAL"
+ scopes = traverse_scope(parse_one(sql, dialect=self.dialect))
+ self.assertEqual(len(scopes), 1)
+ self.assertEqual(set(scopes[0].sources), {"t"})
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 25.25 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
distlib==0.3.9
duckdb==1.2.1
filelock==3.18.0
identify==2.6.9
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pre_commit==4.2.0
Pygments==2.19.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.4.3
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@7fc0055fb04713ba047baa5eda1ce0baf1cc79e2#egg=sqlglot
tomli==2.2.1
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- distlib==0.3.9
- duckdb==1.2.1
- filelock==3.18.0
- identify==2.6.9
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pre-commit==4.2.0
- pygments==2.19.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.4.3
- six==1.17.0
- tomli==2.2.1
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_clickhouse.py::TestClickhouse::test_traverse_scope"
] | [] | [
"tests/dialects/test_clickhouse.py::TestClickhouse::test_agg_functions",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_array_join",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse_values",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_convert",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_cte",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_datetime_funcs",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_drop_on_cluster",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_grant",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary"
] | [] | MIT License | 19,975 | 151 | [
"sqlglot/optimizer/scope.py"
] |
|
rthalley__dnspython-1151 | b9e75af9331c908a92c6cc7bad1b4c1630bdb8ee | 2024-10-17 23:14:06 | b9e75af9331c908a92c6cc7bad1b4c1630bdb8ee | schanzen: Oh I see. Yes my analysis of the server response was a bit superficial.
I have another question then regarding this behavior.
As a caller, I kind of need to know if I get an AXFR response and I need to "blank slate" reset the zone information or this is an incremental update that adds/deletes the records. Is there any way to check which kind of transfer is being done?
schanzen: Another note:
if you change the above code to do an AXFR (`serial=None`) I also get an exception:
```
Traceback (most recent call last):
File "/home/schanzen/dev/ascension/test.py", line 7, in <module>
dns.query.inbound_xfr("195.43.86.175", zone, txn)
File "/home/schanzen/dev/ascension/.venv/lib64/python3.12/site-packages/dns/query.py", line 1665, in inbound_xfr
for _ in _inbound_xfr(txn_manager, s, query, serial, timeout, expiration):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/schanzen/dev/ascension/.venv/lib64/python3.12/site-packages/dns/query.py", line 1470, in _inbound_xfr
done = inbound.process_message(r)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/schanzen/dev/ascension/.venv/lib64/python3.12/site-packages/dns/xfr.py", line 207, in process_message
raise dns.exception.FormError("unexpected origin SOA in AXFR")
dns.exception.FormError: unexpected origin SOA in AXFR
``` | diff --git a/dns/xfr.py b/dns/xfr.py
index f1b8759..d17dd48 100644
--- a/dns/xfr.py
+++ b/dns/xfr.py
@@ -83,8 +83,13 @@ class Inbound:
if rdtype == dns.rdatatype.IXFR:
if serial is None:
raise ValueError("a starting serial must be supplied for IXFRs")
- elif is_udp:
- raise ValueError("is_udp specified for AXFR")
+ self.incremental = True
+ elif rdtype == dns.rdatatype.AXFR:
+ if is_udp:
+ raise ValueError("is_udp specified for AXFR")
+ self.incremental = False
+ else:
+ raise ValueError("rdtype is not IXFR or AXFR")
self.serial = serial
self.is_udp = is_udp
(_, _, self.origin) = txn_manager.origin_information()
@@ -103,8 +108,7 @@ class Inbound:
Returns `True` if the transfer is complete, and `False` otherwise.
"""
if self.txn is None:
- replacement = self.rdtype == dns.rdatatype.AXFR
- self.txn = self.txn_manager.writer(replacement)
+ self.txn = self.txn_manager.writer(not self.incremental)
rcode = message.rcode()
if rcode != dns.rcode.NOERROR:
raise TransferError(rcode)
@@ -131,7 +135,7 @@ class Inbound:
raise dns.exception.FormError("first RRset is not an SOA")
answer_index = 1
self.soa_rdataset = rdataset.copy() # pyright: ignore
- if self.rdtype == dns.rdatatype.IXFR:
+ if self.incremental:
assert self.soa_rdataset is not None
soa = cast(dns.rdtypes.ANY.SOA.SOA, self.soa_rdataset[0])
if soa.serial == self.serial:
@@ -168,7 +172,7 @@ class Inbound:
#
# Every time we see an origin SOA delete_mode inverts
#
- if self.rdtype == dns.rdatatype.IXFR:
+ if self.incremental:
self.delete_mode = not self.delete_mode
#
# If this SOA Rdataset is equal to the first we saw
@@ -177,8 +181,7 @@ class Inbound:
# part of the response.
#
if rdataset == self.soa_rdataset and (
- self.rdtype == dns.rdatatype.AXFR
- or (self.rdtype == dns.rdatatype.IXFR and self.delete_mode)
+ (not self.incremental) or self.delete_mode
):
#
# This is the final SOA
@@ -187,7 +190,7 @@ class Inbound:
if self.expecting_SOA:
# We got an empty IXFR sequence!
raise dns.exception.FormError("empty IXFR sequence")
- if self.rdtype == dns.rdatatype.IXFR and self.serial != soa.serial:
+ if self.incremental and self.serial != soa.serial:
raise dns.exception.FormError("unexpected end of IXFR sequence")
self.txn.replace(name, rdataset)
self.txn.commit()
@@ -199,7 +202,7 @@ class Inbound:
#
self.expecting_SOA = False
soa = cast(dns.rdtypes.ANY.SOA.SOA, rdataset[0])
- if self.rdtype == dns.rdatatype.IXFR:
+ if self.incremental:
if self.delete_mode:
# This is the start of an IXFR deletion set
if soa.serial != self.serial:
@@ -220,7 +223,7 @@ class Inbound:
# SOA RR, but saw something else, so this must be an
# AXFR response.
#
- self.rdtype = dns.rdatatype.AXFR
+ self.incremental = False
self.expecting_SOA = False
self.delete_mode = False
self.txn.rollback()
| Issues with IXFR requests against ee.
**Describe the bug**
I am trying to do an IXFR for `ee.` from `zone.internet.ee.`.
This works fine with dig, for example using `dig ixfr=1720121412 ee @zone.internet.ee.`
However, with dnspython, for some reason the first (and crucial) SOA record is missing and then it falls back to AXFR and then fails on some assertion/sanity check:
```
Traceback (most recent call last):
File "/home/schanzen/dev/ascension/test.py", line 7, in <module>
dns.query.inbound_xfr("195.43.86.175", zone, txn)
File "/home/schanzen/dev/ascension/.venv/lib64/python3.12/site-packages/dns/query.py", line 1665, in inbound_xfr
for _ in _inbound_xfr(txn_manager, s, query, serial, timeout, expiration):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/schanzen/dev/ascension/.venv/lib64/python3.12/site-packages/dns/query.py", line 1470, in _inbound_xfr
done = inbound.process_message(r)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/schanzen/dev/ascension/.venv/lib64/python3.12/site-packages/dns/xfr.py", line 112, in process_message
raise dns.exception.FormError("wrong question rdatatype")
dns.exception.FormError: wrong question rdatatype
```
(it sees that the question has type IXFR and expects AXFR now)
**To Reproduce**
Use this script:
```
import dns.xfr
import dns.query
import dns.zone
zone = dns.zone.Zone("ee")
txn, _ = dns.xfr.make_query (zone, serial=1720121412)
dns.query.inbound_xfr("195.43.86.175", zone, txn)
```
The IP is the result of `host zone.internet.ee`. The serial is picked from an old revision. The behaviour may change at some point if the serial is too old (then the server will probably actually fall back to AXFR and the above code will run fine).
**Context (please complete the following information):**
- dnspython version 2.7.0
- Python version 3.12.6
- OS: Fedora Linux
| rthalley/dnspython | diff --git a/tests/test_xfr.py b/tests/test_xfr.py
index 458cdf9..257397c 100644
--- a/tests/test_xfr.py
+++ b/tests/test_xfr.py
@@ -263,6 +263,30 @@ ns2 3600 IN A 10.0.0.2
@ 3600 IN SOA foo bar 1 2 3 4 5
"""
+ixfr_axfr1 = """id 1
+opcode QUERY
+rcode NOERROR
+flags AA
+;QUESTION
+example. IN IXFR
+;ANSWER
+@ 3600 IN SOA foo bar 1 2 3 4 5
+@ 3600 IN NS ns1
+@ 3600 IN NS ns2
+"""
+ixfr_axfr2 = """id 1
+opcode QUERY
+rcode NOERROR
+flags AA
+;QUESTION
+example. IN IXFR
+;ANSWER
+bar.foo 300 IN MX 0 blaz.foo
+ns1 3600 IN A 10.0.0.1
+ns2 3600 IN A 10.0.0.2
+@ 3600 IN SOA foo bar 1 2 3 4 5
+"""
+
def test_basic_axfr():
z = dns.versioned.Zone("example.")
@@ -394,6 +418,19 @@ def test_ixfr_is_axfr():
assert z == ez
+def test_ixfr_is_axfr_two_parts():
+ z = dns.versioned.Zone("example.")
+ m1 = dns.message.from_text(ixfr_axfr1, origin=z.origin, one_rr_per_rrset=True)
+ m2 = dns.message.from_text(ixfr_axfr2, origin=z.origin, one_rr_per_rrset=True)
+ with dns.xfr.Inbound(z, dns.rdatatype.IXFR, serial=0xFFFFFFFF) as xfr:
+ done = xfr.process_message(m1)
+ assert not done
+ done = xfr.process_message(m2)
+ assert done
+ ez = dns.zone.from_text(base, "example.")
+ assert z == ez
+
+
def test_ixfr_requires_serial():
z = dns.zone.from_text(base, "example.", zone_factory=dns.versioned.Zone)
with pytest.raises(ValueError):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 2.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiofiles==24.1.0
alabaster==0.7.16
astroid==3.3.9
attrs==25.3.0
babel==2.17.0
backports.tarfile==1.2.0
black==25.1.0
blinker==1.9.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
coverage==7.8.0
cryptography==44.0.2
dill==0.3.9
-e git+https://github.com/rthalley/dnspython.git@b9e75af9331c908a92c6cc7bad1b4c1630bdb8ee#egg=dnspython
docutils==0.21.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
Flask==3.1.0
h11==0.14.0
h2==4.2.0
hpack==4.1.0
Hypercorn==0.17.3
hyperframe==6.1.0
id==1.5.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==6.0.1
itsdangerous==2.2.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
Jinja2==3.1.6
keyring==25.6.0
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mccabe==0.7.0
mdurl==0.1.2
more-itertools==10.6.0
mypy==1.15.0
mypy-extensions==1.0.0
nh3==0.2.21
outcome==1.3.0.post0
packaging @ file:///croot/packaging_1734472117206/work
pathspec==0.12.1
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
priority==2.0.0
pycodestyle==2.13.0
pycparser==2.22
pyflakes==3.3.1
Pygments==2.19.1
pylint==3.3.6
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
Quart==0.20.0
quart-trio==0.12.0
readme_renderer==44.0
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
SecretStorage==3.3.3
sniffio==1.3.1
snowballstemmer==2.2.0
sortedcontainers==2.4.0
Sphinx==7.4.7
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
taskgroup==0.2.2
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tomlkit==0.13.2
trio==0.29.0
twine==6.1.0
typing_extensions==4.13.0
urllib3==2.3.0
Werkzeug==3.1.3
wsproto==1.2.0
zipp==3.21.0
| name: dnspython
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiofiles==24.1.0
- alabaster==0.7.16
- astroid==3.3.9
- attrs==25.3.0
- babel==2.17.0
- backports-tarfile==1.2.0
- black==25.1.0
- blinker==1.9.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- coverage==7.8.0
- cryptography==44.0.2
- dill==0.3.9
- dnspython==2.8.0.dev0
- docutils==0.21.2
- flake8==7.2.0
- flask==3.1.0
- h11==0.14.0
- h2==4.2.0
- hpack==4.1.0
- hypercorn==0.17.3
- hyperframe==6.1.0
- id==1.5.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- isort==6.0.1
- itsdangerous==2.2.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- jinja2==3.1.6
- keyring==25.6.0
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- mccabe==0.7.0
- mdurl==0.1.2
- more-itertools==10.6.0
- mypy==1.15.0
- mypy-extensions==1.0.0
- nh3==0.2.21
- outcome==1.3.0.post0
- pathspec==0.12.1
- platformdirs==4.3.7
- priority==2.0.0
- pycodestyle==2.13.0
- pycparser==2.22
- pyflakes==3.3.1
- pygments==2.19.1
- pylint==3.3.6
- pytest-cov==6.0.0
- quart==0.20.0
- quart-trio==0.12.0
- readme-renderer==44.0
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- secretstorage==3.3.3
- sniffio==1.3.1
- snowballstemmer==2.2.0
- sortedcontainers==2.4.0
- sphinx==7.4.7
- sphinx-rtd-theme==3.0.2
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- taskgroup==0.2.2
- tomlkit==0.13.2
- trio==0.29.0
- twine==6.1.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- werkzeug==3.1.3
- wsproto==1.2.0
- zipp==3.21.0
prefix: /opt/conda/envs/dnspython
| [
"tests/test_xfr.py::test_ixfr_is_axfr_two_parts"
] | [] | [
"tests/test_xfr.py::test_basic_axfr",
"tests/test_xfr.py::test_basic_axfr_unversioned",
"tests/test_xfr.py::test_basic_axfr_two_parts",
"tests/test_xfr.py::test_axfr_unexpected_origin",
"tests/test_xfr.py::test_basic_ixfr",
"tests/test_xfr.py::test_basic_ixfr_unversioned",
"tests/test_xfr.py::test_compressed_ixfr",
"tests/test_xfr.py::test_basic_ixfr_many_parts",
"tests/test_xfr.py::test_good_empty_ixfr",
"tests/test_xfr.py::test_retry_tcp_ixfr",
"tests/test_xfr.py::test_bad_empty_ixfr",
"tests/test_xfr.py::test_serial_went_backwards_ixfr",
"tests/test_xfr.py::test_ixfr_is_axfr",
"tests/test_xfr.py::test_ixfr_requires_serial",
"tests/test_xfr.py::test_ixfr_unexpected_end_bad_diff_sequence",
"tests/test_xfr.py::test_udp_ixfr_unexpected_end_just_stops",
"tests/test_xfr.py::test_ixfr_bad_serial",
"tests/test_xfr.py::test_no_udp_with_axfr",
"tests/test_xfr.py::test_refused",
"tests/test_xfr.py::test_bad_qname",
"tests/test_xfr.py::test_bad_qtype",
"tests/test_xfr.py::test_soa_not_first",
"tests/test_xfr.py::test_no_answer",
"tests/test_xfr.py::test_axfr_answers_after_final_soa",
"tests/test_xfr.py::test_make_query_basic",
"tests/test_xfr.py::test_make_query_bad_serial",
"tests/test_xfr.py::test_extract_serial_from_query",
"tests/test_xfr.py::test_sync_inbound_xfr",
"tests/test_xfr.py::test_asyncio_inbound_xfr",
"tests/test_xfr.py::test_trio_inbound_xfr",
"tests/test_xfr.py::test_sync_retry_tcp_inbound_xfr",
"tests/test_xfr.py::test_asyncio_retry_tcp_inbound_xfr"
] | [] | ISC License | 19,977 | 938 | [
"dns/xfr.py"
] |
codingjoe__relint-96 | 0146df523e820cba9f11188f2766aba09852c2ca | 2024-10-18 10:54:08 | 0146df523e820cba9f11188f2766aba09852c2ca | diff --git a/relint/__main__.py b/relint/__main__.py
index afb162f..ed831d1 100644
--- a/relint/__main__.py
+++ b/relint/__main__.py
@@ -1,4 +1,5 @@
import argparse
+import os
import subprocess # nosec
import sys
import warnings
@@ -6,7 +7,13 @@ import warnings
from rich.progress import track
from relint.config import load_config
-from relint.parse import lint_file, match_with_diff_changes, parse_diff, print_culprits
+from relint.parse import (
+ lint_file,
+ match_with_diff_changes,
+ parse_diff,
+ print_culprits,
+ print_github_actions_output,
+)
def parse_args(args=None):
@@ -90,7 +97,11 @@ def main(args=None):
changed_content = parse_diff(output)
matches = match_with_diff_changes(changed_content, matches)
- exit_code = print_culprits(matches, args)
+ GITHUB_ACTIONS = os.getenv("GITHUB_ACTIONS") == "true"
+ if GITHUB_ACTIONS:
+ exit_code = print_github_actions_output(matches, args)
+ else:
+ exit_code = print_culprits(matches, args)
exit(exit_code)
diff --git a/relint/parse.py b/relint/parse.py
index 192f3b2..9c44462 100644
--- a/relint/parse.py
+++ b/relint/parse.py
@@ -93,6 +93,23 @@ def split_diff_content_by_filename(output: str) -> {str: str}:
return content_by_filename
+def print_github_actions_output(matches, args):
+ exit_code = 0
+ for filename, test, match, line_number in matches:
+ exit_code = test.error if exit_code == 0 else exit_code
+ start_line_no = match.string[: match.start()].count("\n") + 1
+ end_line_no = match.string[: match.end()].count("\n") + 1
+ col = match.start() - match.string.rfind("\n", 0, match.start())
+ col_end = match.end() - match.string.rfind("\n", 0, match.end())
+
+ print(
+ f"::{'error' if test.error else 'warning'} file={filename},"
+ f"line={start_line_no},endLine={end_line_no},col={col},colEnd={col_end},"
+ f"title={test.name}::{test.hint}".replace("\n", "%0A")
+ )
+ return exit_code
+
+
def print_culprits(matches, args):
exit_code = 0
messages = []
| Add alternative format for GitHub PR annotations
First of all, thank you for the amazing Swiss army knife that is reLint! 🙏 💙
We use it to augment the linting that SQLFluff provides, since it only covers the basics (and not all of our SQL coding standards).
This feature request is basically to be able to easily post reLint results as annotation in GitHub PRs. Basically, [something similar to what SQLFluff is doing](https://docs.sqlfluff.com/en/2.2.0/production.html#using-github-actions-to-annotate-prs): `--format github-annotation-native`.
Or do you think this is something that belongs outside of the reLint codebase? (kinda like [container images](https://github.com/codingjoe/relint/discussions/59)) | codingjoe/relint | diff --git a/tests/conftest.py b/tests/conftest.py
index a833446..52919b6 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -9,3 +9,9 @@ FIXTURE_DIR = pathlib.Path(__file__).parent / "fixtures"
def fixture_dir():
"""Return the path to the fixture directory."""
return FIXTURE_DIR
+
+
[email protected](autouse=True)
+def env(monkeypatch):
+ """Remove GITHUB_ACTIONS the environment as it is part of our feature set."""
+ monkeypatch.setenv("GITHUB_ACTIONS", "false")
diff --git a/tests/test_main.py b/tests/test_main.py
index b8cfd80..850aa56 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -44,6 +44,25 @@ class TestMain:
assert "❱ 1 # FIXME do something" in out
assert exc_info.value.code == 1
+ def test_main_execution_with_error__github_workflow_output(
+ self, monkeypatch, capsys, tmpdir, fixture_dir
+ ):
+ monkeypatch.setenv("GITHUB_ACTIONS", "true")
+ with (fixture_dir / ".relint.yml").open() as fs:
+ config = fs.read()
+ tmpdir.join(".relint.yml").write(config)
+ tmpdir.join("dummy.py").write("# FIXME do something")
+ with tmpdir.as_cwd():
+ with pytest.raises(SystemExit) as exc_info:
+ main(["dummy.py"])
+
+ out, _ = capsys.readouterr()
+ assert (
+ "::error file=dummy.py,line=1,endLine=1,col=3,colEnd=8,title=No fixme (warning)::### This is a multiline hint%0AFix it right away!%0A%0AYou can use code blocks too, like Python:%0A%0A"
+ in out
+ )
+ assert exc_info.value.code == 1
+
@pytest.mark.parametrize("args", [[], ["--summarize"]])
def test_main_execution_without_hint(self, args, capsys, tmpdir, fixture_dir):
with (fixture_dir / ".relint.yml").open() as fs:
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 3.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
markdown-it-py==3.0.0
mdurl==0.1.2
packaging==24.2
pluggy==1.5.0
Pygments==2.19.1
pytest==8.3.5
pytest-cov==6.0.0
PyYAML==6.0.2
-e git+https://github.com/codingjoe/relint.git@0146df523e820cba9f11188f2766aba09852c2ca#egg=relint
rich==14.0.0
tomli==2.2.1
typing_extensions==4.13.0
| name: relint
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- markdown-it-py==3.0.0
- mdurl==0.1.2
- packaging==24.2
- pluggy==1.5.0
- pygments==2.19.1
- pytest==8.3.5
- pytest-cov==6.0.0
- pyyaml==6.0.2
- relint==3.2.1.dev3+g0146df5
- rich==14.0.0
- tomli==2.2.1
- typing-extensions==4.13.0
prefix: /opt/conda/envs/relint
| [
"tests/test_main.py::TestMain::test_main_execution_with_error__github_workflow_output"
] | [] | [
"tests/test_main.py::test_version",
"tests/test_main.py::TestMain::test_main_execution",
"tests/test_main.py::TestMain::test_main_execution_with_error",
"tests/test_main.py::TestMain::test_main_execution_without_hint[args0]",
"tests/test_main.py::TestMain::test_main_execution_without_hint[args1]",
"tests/test_main.py::TestMain::test_raise_for_warnings",
"tests/test_main.py::TestMain::test_ignore_warnings",
"tests/test_main.py::TestMain::test_summarize",
"tests/test_main.py::TestMain::test_code_padding_disabled"
] | [] | MIT License | 19,980 | 644 | [
"relint/__main__.py",
"relint/parse.py"
] |
|
zarr-developers__zarr-python-2397 | e35c4fcfcf91e2f693dd789379657eacddc4332d | 2024-10-18 12:58:42 | e602aa1d19f26bb06669994231e524c55bcecbeb | d-v-b: > This PR fixes a pretty major bug in the read code path of the sharding codec. Instead of issuing byte ranges in the form of (byte_start_offset, byte_length), the codec issues (byte_start_offset, byte_end_offset).
>
> Fixes #2302
Thanks for this fix. I'm guessing the bug slipped by because when the offset is 0, the `(start, start + length)` and `(start, end)` are identical. I don't remember offhand which convention for linear indexing we are using in the rest of the codebase, but at some point we should ensure that it's all consistent. But that's out of scope for this PR.
@TomAugspurger since you are interested in `NewType` usage: using `NewType` to brand ints as `IntervalStart`, `IntervalEnd`, `IntervalLength` could be a fun use of `NewType`.
normanrz: > I don't remember offhand which convention for linear indexing we are using in the rest of the codebase, but at some point we should ensure that it's all consistent. But that's out of scope for this PR.
The store uses `(start, length)`. Python slices uses `(start, end)`. HTTP uses `(start, end-1)`.
I now refactored the sharding codec to use `(start, length)` because that is what the store expects.
Would be good to get this fix out quickly!
jhamman: We have a small merge conflict to solve but if that is good, I can include this in the beta.1 release later today. | diff --git a/src/zarr/codecs/sharding.py b/src/zarr/codecs/sharding.py
index 2181e9eb..d01e116f 100644
--- a/src/zarr/codecs/sharding.py
+++ b/src/zarr/codecs/sharding.py
@@ -129,7 +129,7 @@ class _ShardIndex(NamedTuple):
if (chunk_start, chunk_len) == (MAX_UINT_64, MAX_UINT_64):
return None
else:
- return (int(chunk_start), int(chunk_start) + int(chunk_len))
+ return (int(chunk_start), int(chunk_len))
def set_chunk_slice(self, chunk_coords: ChunkCoords, chunk_slice: slice | None) -> None:
localized_chunk = self._localize_chunk(chunk_coords)
@@ -203,7 +203,7 @@ class _ShardReader(ShardMapping):
def __getitem__(self, chunk_coords: ChunkCoords) -> Buffer:
chunk_byte_slice = self.index.get_chunk_slice(chunk_coords)
if chunk_byte_slice:
- return self.buf[chunk_byte_slice[0] : chunk_byte_slice[1]]
+ return self.buf[chunk_byte_slice[0] : (chunk_byte_slice[0] + chunk_byte_slice[1])]
raise KeyError
def __len__(self) -> int:
| [v3] `zarr-python` fails to decode sharded array written by other implementations.
### Zarr version
3.0.0a5
### Numcodecs version
0.13.0
### Python Version
3.10.14
### Operating System
Linux
### Installation
pip install zarr==3.0.0a5
### Description
It appears as though the implementation for partial decoding chunks written using the sharding indexed codec is buggy. As in, the implementation fails to read a sharding-encoded array created by another implementation. The same dataset can be read without problem by the [`zarrita`](https://github.com/scalableminds/zarrita) project.
### Steps to reproduce
[shardingdata.zip](https://github.com/user-attachments/files/17300875/shardingdata.zip)
I have attached a zip file containing the filesystem store `testdata.zarr` used to reproduce the bug.
```python
import numpy
import zarr
store = zarr.store.LocalStore("./testdata.zarr", mode="r")
a = zarr.Array.open(store.root / "some/group/another") # reads the sharded array with path /some/group/another.
a[:20, 10:11, :].shape # this fails with a long stacktrace.
# ValueError: When changing to a larger dtype, its size must be a divisor of the total size in bytes of the last axis of the array.
a2 = zarr.Array.open(store.root / "some/group/name") # reads a non-sharded array from same store with path /some/group/name.
a2[:20, 10:11, :].shape # this works
```
If I use `zarrita` to read the problematic array, I get no errors:
```python
import zarrita
store2 = zarrita.LocalStore('./testdata.zarr')
a3 = zarrita.Array.open(store2 / "some/group/another")
a3[:20, 10:11, :].shape # decodes just fine and prints (20, 1, 50)
```
### Additional output
Here is the metadata of the "problematic" array:
```json
{
"zarr_format":3,
"shape":[100,100,50],
"node_type":"array",
"data_type":"float64",
"codecs":
[
{"name":"sharding_indexed",
"configuration":
{"chunk_shape":[5,3,5],
"index_location":"start",
"index_codecs":[{"name":"bytes","configuration":{"endian":"big"}},{"name":"crc32c"}],
"codecs":[{"name":"transpose","configuration":{"order":[2,0,1]}},
{"name":"bytes","configuration":{"endian":"little"}}]}}],
"fill_value":"-Infinity",
"chunk_grid":{"name":"regular","configuration":{"chunk_shape":[10,15,20]}},
"chunk_key_encoding":{"name":"default","configuration":{"separator":"/"}}}
```
Here is the stacktrace of the exception thrown by `zarr-python` when reading the sharded array:
<details>
<summary>stacktrace</summary>
```python
ValueError Traceback (most recent call last)
Cell In[30], line 1
----> 1 a[:20, 10:11, :].shape
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/core/array.py:919,
in Array.__getitem__(self, selection)
917 return self.vindex[cast(CoordinateSelection | MaskSelection, selection)]
918 elif is_pure_orthogonal_indexing(pure_selection, self.ndim):
--> 919 return self.get_orthogonal_selection(pure_selection, fields=fields)
920 else:
921 return self.get_basic_selection(cast(BasicSelection, pure_selection), fi
elds=fields)
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/_compat.py:43, in _
deprecate_positional_args.<locals>._inner_deprecate_positional_args.<locals>.inner_f
(*args, **kwargs)
41 extra_args = len(args) - len(all_args)
42 if extra_args <= 0:
---> 43 return f(*args, **kwargs)
45 # extra_args > 0
46 args_msg = [
47 f"{name}={arg}"
48 for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:], stric
t=False)
49 ]
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/core/array.py:1361,
in Array.get_orthogonal_selection(self, selection, out, fields, prototype)
1359 prototype = default_buffer_prototype()
1360 indexer = OrthogonalIndexer(selection, self.shape, self.metadata.chunk_grid)
-> 1361 return sync(
1362 self._async_array._get_selection(
1363 indexer=indexer, out=out, fields=fields, prototype=prototype
1364 )
1365 )
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/core/sync.py:91, in
sync(coro, loop, timeout)
88 return_result = next(iter(finished)).result()
90 if isinstance(return_result, BaseException):
---> 91 raise return_result
92 else:
93 return return_result
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/core/sync.py:50, in
_runner(coro)
45 """
46 Await a coroutine and return the result of running it. If awaiting the corou
tine raises an
47 exception, the exception will be returned.
48 """
49 try:
---> 50 return await coro
51 except Exception as ex:
52 return ex
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/core/array.py:476,
in AsyncArray._get_selection(self, indexer, prototype, out, fields)
468 out_buffer = prototype.nd_buffer.create(
469 shape=indexer.shape,
470 dtype=out_dtype,
471 order=self.order,
472 fill_value=self.metadata.fill_value,
473 )
474 if product(indexer.shape) > 0:
475 # reading chunks and decoding them
--> 476 await self.codec_pipeline.read(
477 [
478 (
479 self.store_path / self.metadata.encode_chunk_key(chunk_coord
s),
480 self.metadata.get_chunk_spec(chunk_coords, self.order, proto
type=prototype),
481 chunk_selection,
482 out_selection,
483 )
484 for chunk_coords, chunk_selection, out_selection in indexer
485 ],
486 out_buffer,
487 drop_axes=indexer.drop_axes,
488 )
489 return out_buffer.as_ndarray_like()
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/codecs/pipeline.py:
427, in BatchedCodecPipeline.read(self, batch_info, out, drop_axes)
421 async def read(
422 self,
423 batch_info: Iterable[tuple[ByteGetter, ArraySpec, SelectorTuple, Selecto
rTuple]],
424 out: NDBuffer,
425 drop_axes: tuple[int, ...] = (),
426 ) -> None:
--> 427 await concurrent_map(
428 [
429 (single_batch_info, out, drop_axes)
430 for single_batch_info in batched(batch_info, self.batch_size)
431 ],
432 self.read_batch,
433 config.get("async.concurrency"),
434 )
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/core/common.py:53,
in concurrent_map(items, func, limit)
49 async def concurrent_map(
50 items: list[T], func: Callable[..., Awaitable[V]], limit: int | None = N
one
51 ) -> list[V]:
52 if limit is None:
---> 53 return await asyncio.gather(*[func(*item) for item in items])
55 else:
56 sem = asyncio.Semaphore(limit)
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/codecs/pipeline.py:
238, in BatchedCodecPipeline.read_batch(self, batch_info, out, drop_axes)
231 async def read_batch(
232 self,
233 batch_info: Iterable[tuple[ByteGetter, ArraySpec, SelectorTuple, Selecto
rTuple]],
234 out: NDBuffer,
235 drop_axes: tuple[int, ...] = (),
236 ) -> None:
237 if self.supports_partial_decode:
--> 238 chunk_array_batch = await self.decode_partial_batch(
239 [
240 (byte_getter, chunk_selection, chunk_spec)
241 for byte_getter, chunk_spec, chunk_selection, _ in batch_inf
o
242 ]
243 )
244 for chunk_array, (_, chunk_spec, _, out_selection) in zip(
245 chunk_array_batch, batch_info, strict=False
246 ):
247 if chunk_array is not None:
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/codecs/pipeline.py:
194, in BatchedCodecPipeline.decode_partial_batch(self, batch_info)
192 assert self.supports_partial_decode
193 assert isinstance(self.array_bytes_codec, ArrayBytesCodecPartialDecodeMixin)
--> 194 return await self.array_bytes_codec.decode_partial(batch_info)
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/abc/codec.py:200, i
n ArrayBytesCodecPartialDecodeMixin.decode_partial(self, batch_info)
180 async def decode_partial(
181 self,
182 batch_info: Iterable[tuple[ByteGetter, SelectorTuple, ArraySpec]],
183 ) -> Iterable[NDBuffer | None]:
184 """Partially decodes a batch of chunks.
185 This method determines parts of a chunk from the slice selection,
186 fetches these parts from the store (via ByteGetter) and decodes them.
(...)
198 Iterable[NDBuffer | None]
199 """
--> 200 return await concurrent_map(
201 list(batch_info),
202 self._decode_partial_single,
203 config.get("async.concurrency"),
204 )
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/core/common.py:53,
in concurrent_map(items, func, limit)
49 async def concurrent_map(
50 items: list[T], func: Callable[..., Awaitable[V]], limit: int | None = N
one
51 ) -> list[V]:
52 if limit is None:
---> 53 return await asyncio.gather(*[func(*item) for item in items])
55 else:
56 sem = asyncio.Semaphore(limit)
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/codecs/sharding.py:
510, in ShardingCodec._decode_partial_single(self, byte_getter, selection, shard_spe
c)
507 shard_dict[chunk_coords] = chunk_bytes
509 # decoding chunks and writing them into the output buffer
--> 510 await self.codec_pipeline.read(
511 [
512 (
513 _ShardingByteGetter(shard_dict, chunk_coords),
514 chunk_spec,
515 chunk_selection,
516 out_selection,
517 )
518 for chunk_coords, chunk_selection, out_selection in indexer
519 ],
520 out,
521 )
522 return out
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/codecs/pipeline.py:
427, in BatchedCodecPipeline.read(self, batch_info, out, drop_axes)
421 async def read(
422 self,
423 batch_info: Iterable[tuple[ByteGetter, ArraySpec, SelectorTuple, Selecto
rTuple]],
424 out: NDBuffer,
425 drop_axes: tuple[int, ...] = (),
426 ) -> None:
--> 427 await concurrent_map(
428 [
429 (single_batch_info, out, drop_axes)
430 for single_batch_info in batched(batch_info, self.batch_size)
431 ],
432 self.read_batch,
433 config.get("async.concurrency"),
434 )
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/core/common.py:53,
in concurrent_map(items, func, limit)
49 async def concurrent_map(
50 items: list[T], func: Callable[..., Awaitable[V]], limit: int | None = N
one
51 ) -> list[V]:
52 if limit is None:
---> 53 return await asyncio.gather(*[func(*item) for item in items])
55 else:
56 sem = asyncio.Semaphore(limit)
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/codecs/pipeline.py:
260, in BatchedCodecPipeline.read_batch(self, batch_info, out, drop_axes)
251 else:
252 chunk_bytes_batch = await concurrent_map(
253 [
254 (byte_getter, array_spec.prototype)
(...)
258 config.get("async.concurrency"),
259 )
--> 260 chunk_array_batch = await self.decode_batch(
261 [
262 (chunk_bytes, chunk_spec)
263 for chunk_bytes, (_, chunk_spec, _, _) in zip(
264 chunk_bytes_batch, batch_info, strict=False
265 )
266 ],
267 )
268 for chunk_array, (_, chunk_spec, chunk_selection, out_selection) in zip(
269 chunk_array_batch, batch_info, strict=False
270 ):
271 if chunk_array is not None:
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/codecs/pipeline.py:
177, in BatchedCodecPipeline.decode_batch(self, chunk_bytes_and_specs)
172 chunk_bytes_batch = await bb_codec.decode(
173 zip(chunk_bytes_batch, chunk_spec_batch, strict=False)
174 )
176 ab_codec, chunk_spec_batch = ab_codec_with_spec
--> 177 chunk_array_batch = await ab_codec.decode(
178 zip(chunk_bytes_batch, chunk_spec_batch, strict=False)
179 )
181 for aa_codec, chunk_spec_batch in aa_codecs_with_spec[::-1]:
182 chunk_array_batch = await aa_codec.decode(
183 zip(chunk_array_batch, chunk_spec_batch, strict=False)
184 )
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/abc/codec.py:125, i
n _Codec.decode(self, chunks_and_specs)
109 async def decode(
110 self,
111 chunks_and_specs: Iterable[tuple[CodecOutput | None, ArraySpec]],
112 ) -> Iterable[CodecInput | None]:
113 """Decodes a batch of chunks.
114 Chunks can be None in which case they are ignored by the codec.
115
(...)
123 Iterable[CodecInput | None]
124 """
--> 125 return await _batching_helper(self._decode_single, chunks_and_specs)
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/abc/codec.py:409, i
n _batching_helper(func, batch_info)
405 async def _batching_helper(
406 func: Callable[[CodecInput, ArraySpec], Awaitable[CodecOutput | None]],
407 batch_info: Iterable[tuple[CodecInput | None, ArraySpec]],
408 ) -> list[CodecOutput | None]:
--> 409 return await concurrent_map(
410 list(batch_info),
411 _noop_for_none(func),
412 config.get("async.concurrency"),
413 )
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/core/common.py:53,
in concurrent_map(items, func, limit)
49 async def concurrent_map(
50 items: list[T], func: Callable[..., Awaitable[V]], limit: int | None = N
one
51 ) -> list[V]:
52 if limit is None:
---> 53 return await asyncio.gather(*[func(*item) for item in items])
55 else:
56 sem = asyncio.Semaphore(limit)
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/abc/codec.py:422, i
n _noop_for_none.<locals>.wrap(chunk, chunk_spec)
420 if chunk is None:
421 return None
--> 422 return await func(chunk, chunk_spec)
File ~/micromamba/envs/general/lib/python3.10/site-packages/zarr/codecs/bytes.py:89,
in BytesCodec._decode_single(self, chunk_bytes, chunk_spec)
86 else:
87 as_nd_array_like = np.asanyarray(as_array_like)
88 chunk_array = chunk_spec.prototype.nd_buffer.from_ndarray_like(
---> 89 as_nd_array_like.view(dtype=dtype)
90 )
92 # ensure correct chunk shape
93 if chunk_array.shape != chunk_spec.shape:
ValueError: When changing to a larger dtype, its size must be a divisor of the total
size in bytes of the last axis of the array.
```
</details>
### NOTE
It's worth noting that if I add a compression codec to the sharding config's codec pipeline (e.g Gzip with compression level 5); I get a different exception: `ValueError: cannot reshape array of size 225 into shape (5,5,3)` . `zarrita` still reads the array correctly even in this scenario.
### Data generation code
The data inside the attached zip can be generated using the following python code:
<details>
<summary>Python code</summary>
```python
import numpy as np
import zarrita
store = zarrita.LocalStore('testdata.zarr')
a = zarrita.Array.create(
store / "some/group/another",
shape=(100, 100, 50),
dtype='float64',
chunk_shape=(10, 15, 20),
chunk_key_encoding=('default', '/'),
codecs=[
zarrita.codecs.sharding_codec(
chunk_shape=(5,3,5),
codecs=[
zarrita.codecs.transpose_codec(order=(2,0,1)),
zarrita.codecs.bytes_codec(endian="little")
],
index_codecs=[
zarrita.codecs.bytes_codec(endian="big"),
zarrita.codecs.crc32c_codec()
],
index_location="start")
],
fill_value=-np.inf
)
a[:30, 10:12, :] = np.random.rand(30, 2, 50)
```
</details> | zarr-developers/zarr-python | diff --git a/tests/test_codecs/test_sharding.py b/tests/test_codecs/test_sharding.py
index c0dcfbf3..f827a072 100644
--- a/tests/test_codecs/test_sharding.py
+++ b/tests/test_codecs/test_sharding.py
@@ -118,6 +118,42 @@ def test_sharding_partial(
assert np.array_equal(data, read_data)
[email protected]("index_location", ["start", "end"])
[email protected]("store", ["local", "memory", "zip"], indirect=["store"])
[email protected](
+ "array_fixture",
+ [
+ ArrayRequest(shape=(128,) * 3, dtype="uint16", order="F"),
+ ],
+ indirect=["array_fixture"],
+)
+def test_sharding_partial_readwrite(
+ store: Store, array_fixture: npt.NDArray[Any], index_location: ShardingCodecIndexLocation
+) -> None:
+ data = array_fixture
+ spath = StorePath(store)
+ a = Array.create(
+ spath,
+ shape=data.shape,
+ chunk_shape=data.shape,
+ dtype=data.dtype,
+ fill_value=0,
+ codecs=[
+ ShardingCodec(
+ chunk_shape=(1, data.shape[1], data.shape[2]),
+ codecs=[BytesCodec()],
+ index_location=index_location,
+ )
+ ],
+ )
+
+ a[:] = data
+
+ for x in range(data.shape[0]):
+ read_data = a[x, :, :]
+ assert np.array_equal(data[x], read_data)
+
+
@pytest.mark.parametrize(
"array_fixture",
[
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 3.0 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-asyncio",
"moto[s3]",
"mypy",
"hypothesis",
"universal-pathlib"
],
"pre_install": [],
"python": "3.11",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asciitree==0.3.3
attrs==25.3.0
boto3==1.37.23
botocore==1.37.23
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
coverage==7.8.0
crc32c==2.7.1
cryptography==44.0.2
Deprecated==1.2.18
donfig==0.8.1.post1
fsspec==2025.3.1
hypothesis==6.130.5
idna==3.10
iniconfig==2.1.0
Jinja2==3.1.6
jmespath==1.0.1
MarkupSafe==3.0.2
moto==5.1.2
mypy==1.15.0
mypy-extensions==1.0.0
numcodecs==0.15.1
numpy==2.2.4
packaging==24.2
pluggy==1.5.0
py-partiql-parser==0.6.1
pycparser==2.22
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
responses==0.25.7
s3transfer==0.11.4
six==1.17.0
sortedcontainers==2.4.0
typing_extensions==4.13.0
universal_pathlib==0.2.6
urllib3==2.3.0
Werkzeug==3.1.3
wrapt==1.17.2
xmltodict==0.14.2
-e git+https://github.com/zarr-developers/zarr-python.git@e35c4fcfcf91e2f693dd789379657eacddc4332d#egg=zarr
| name: zarr-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py311h06a4308_0
- python=3.11.11=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py311h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py311h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asciitree==0.3.3
- attrs==25.3.0
- boto3==1.37.23
- botocore==1.37.23
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- coverage==7.8.0
- crc32c==2.7.1
- cryptography==44.0.2
- deprecated==1.2.18
- donfig==0.8.1.post1
- fsspec==2025.3.1
- hypothesis==6.130.5
- idna==3.10
- iniconfig==2.1.0
- jinja2==3.1.6
- jmespath==1.0.1
- markupsafe==3.0.2
- moto==5.1.2
- mypy==1.15.0
- mypy-extensions==1.0.0
- numcodecs==0.15.1
- numpy==2.2.4
- packaging==24.2
- pluggy==1.5.0
- py-partiql-parser==0.6.1
- pycparser==2.22
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- responses==0.25.7
- s3transfer==0.11.4
- six==1.17.0
- sortedcontainers==2.4.0
- typing-extensions==4.13.0
- universal-pathlib==0.2.6
- urllib3==2.3.0
- werkzeug==3.1.3
- wrapt==1.17.2
- xmltodict==0.14.2
- zarr==3.0.0b1.dev36+ge35c4fcf
prefix: /opt/conda/envs/zarr-python
| [
"tests/test_codecs/test_sharding.py::test_sharding_partial_readwrite[array_fixture0-local-start]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_readwrite[array_fixture0-local-end]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_readwrite[array_fixture0-memory-start]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_readwrite[array_fixture0-memory-end]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_readwrite[array_fixture0-zip-start]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_readwrite[array_fixture0-zip-end]"
] | [] | [
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture0-start-local]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture0-start-memory]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture0-start-zip]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture0-end-local]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture0-end-memory]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture0-end-zip]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture1-start-local]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture1-start-memory]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture1-start-zip]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture1-end-local]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture1-end-memory]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture1-end-zip]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture2-start-local]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture2-start-memory]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture2-start-zip]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture2-end-local]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture2-end-memory]",
"tests/test_codecs/test_sharding.py::test_sharding[0-array_fixture2-end-zip]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture0-start-local]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture0-start-memory]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture0-start-zip]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture0-end-local]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture0-end-memory]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture0-end-zip]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture1-start-local]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture1-start-memory]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture1-start-zip]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture1-end-local]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture1-end-memory]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture1-end-zip]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture2-start-local]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture2-start-memory]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture2-start-zip]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture2-end-local]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture2-end-memory]",
"tests/test_codecs/test_sharding.py::test_sharding[10-array_fixture2-end-zip]",
"tests/test_codecs/test_sharding.py::test_sharding_partial[array_fixture0-local-start]",
"tests/test_codecs/test_sharding.py::test_sharding_partial[array_fixture0-local-end]",
"tests/test_codecs/test_sharding.py::test_sharding_partial[array_fixture0-memory-start]",
"tests/test_codecs/test_sharding.py::test_sharding_partial[array_fixture0-memory-end]",
"tests/test_codecs/test_sharding.py::test_sharding_partial[array_fixture0-zip-start]",
"tests/test_codecs/test_sharding.py::test_sharding_partial[array_fixture0-zip-end]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_read[local-start-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_read[local-end-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_read[memory-start-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_read[memory-end-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_read[zip-start-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_read[zip-end-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_overwrite[local-start-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_overwrite[local-end-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_overwrite[memory-start-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_overwrite[memory-end-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_overwrite[zip-start-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_sharding_partial_overwrite[zip-end-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_nested_sharding[local-start-start-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_nested_sharding[local-start-end-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_nested_sharding[local-end-start-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_nested_sharding[local-end-end-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_nested_sharding[memory-start-start-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_nested_sharding[memory-start-end-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_nested_sharding[memory-end-start-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_nested_sharding[memory-end-end-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_nested_sharding[zip-start-start-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_nested_sharding[zip-start-end-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_nested_sharding[zip-end-start-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_nested_sharding[zip-end-end-array_fixture0]",
"tests/test_codecs/test_sharding.py::test_open_sharding[local]",
"tests/test_codecs/test_sharding.py::test_open_sharding[memory]",
"tests/test_codecs/test_sharding.py::test_open_sharding[zip]",
"tests/test_codecs/test_sharding.py::test_write_partial_sharded_chunks[local]",
"tests/test_codecs/test_sharding.py::test_write_partial_sharded_chunks[memory]",
"tests/test_codecs/test_sharding.py::test_write_partial_sharded_chunks[zip]",
"tests/test_codecs/test_sharding.py::test_delete_empty_shards[local]",
"tests/test_codecs/test_sharding.py::test_delete_empty_shards[memory]",
"tests/test_codecs/test_sharding.py::test_pickle"
] | [] | MIT License | 19,982 | 307 | [
"src/zarr/codecs/sharding.py"
] |
zpieslak__mobilus-client-18 | 702c41f2e88732f7331e7f68e32e667a7415b3c5 | 2024-10-18 15:03:56 | 3dbb9adc147182372819890c60255d7e2682715e | diff --git a/mobilus_client/app.py b/mobilus_client/app.py
index 0eae7e0..210eb08 100644
--- a/mobilus_client/app.py
+++ b/mobilus_client/app.py
@@ -1,4 +1,5 @@
import logging
+import secrets
import socket
from mobilus_client.config import Config
@@ -16,7 +17,7 @@ class App:
self.message_registry = MessageRegistry()
self.key_registry = KeyRegistry(config.user_key)
self.client = MqttClient(
- client_id=config.client_id,
+ client_id=secrets.token_hex(6).upper(),
transport=config.gateway_protocol,
userdata={
"config": config,
diff --git a/mobilus_client/config.py b/mobilus_client/config.py
index 572ca03..1918598 100644
--- a/mobilus_client/config.py
+++ b/mobilus_client/config.py
@@ -1,5 +1,4 @@
-import secrets
-from dataclasses import dataclass, field
+from dataclasses import dataclass
from mobilus_client.utils.encryption import create_key
@@ -11,7 +10,6 @@ class Config:
user_password: str
auth_timeout_period: float = 30
- client_id: str = field(default_factory=lambda: secrets.token_hex(6).upper())
gateway_port: int = 8884
gateway_protocol: str = "websockets"
timeout_period: float = 30
diff --git a/mobilus_client/mqtt_client.py b/mobilus_client/mqtt_client.py
index d935d37..731b62a 100644
--- a/mobilus_client/mqtt_client.py
+++ b/mobilus_client/mqtt_client.py
@@ -17,6 +17,7 @@ logger = logging.getLogger(__name__)
class MqttClient(mqtt.Client):
+ _client_id: bytes
_userdata: dict[str, Any]
def __init__(self, **kwargs: Any) -> None: # noqa: ANN401
@@ -43,7 +44,7 @@ class MqttClient(mqtt.Client):
encrypted_message = MessageEncryptor.encrypt(
cast(MessageRequest, message),
- self._userdata["config"].client_id,
+ self._client_id.decode(),
self._userdata["key_registry"],
)
@@ -52,9 +53,9 @@ class MqttClient(mqtt.Client):
def on_disconnect(self, _client: mqtt.Client, _userdata: dict[str, Any], reason_code: int) -> None: # type: ignore[override]
logger.info("Disconnected with result code - %s", reason_code)
- def on_connect(self, client: mqtt.Client, userdata: dict[str, Any], *_args: Any) -> None: # type: ignore[override] # noqa: ANN401
+ def on_connect(self, client: mqtt.Client, _userdata: dict[str, Any], *_args: Any) -> None: # type: ignore[override] # noqa: ANN401
client.subscribe([
- (userdata["config"].client_id, 0),
+ (self._client_id.decode(), 0),
("clients", 0),
])
| Move client_id generation out of Config class
Generate unique client_id on each mqtt client initialization, instead of doing it in Config class.
This is first step needed to App class to be more flexible and allow multiple mqtt connections. | zpieslak/mobilus-client | diff --git a/tests/test_app.py b/tests/test_app.py
index 36744b1..66e0caa 100644
--- a/tests/test_app.py
+++ b/tests/test_app.py
@@ -15,7 +15,6 @@ from tests.factories import (
class TestApp(unittest.TestCase):
def setUp(self) -> None:
self.config = Config(
- client_id="0123456789ABCDEF",
gateway_host="host",
user_login="login",
user_password="password",
diff --git a/tests/test_config.py b/tests/test_config.py
index 3d6f7a8..6f792b2 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -15,9 +15,6 @@ class TestConfig(unittest.TestCase):
def test_auth_timeout_period(self) -> None:
self.assertEqual(self.config.auth_timeout_period, 30)
- def test_client_id(self) -> None:
- self.assertEqual(len(self.config.client_id), 12)
-
def test_gateway_host(self) -> None:
self.assertEqual(self.config.gateway_host, "host")
diff --git a/tests/test_mqtt_client.py b/tests/test_mqtt_client.py
index 66d06d8..83dbd8d 100644
--- a/tests/test_mqtt_client.py
+++ b/tests/test_mqtt_client.py
@@ -22,8 +22,8 @@ from tests.helpers import encrypt_message
class TestMQTTClient(unittest.TestCase):
def setUp(self) -> None:
+ self.client_id = "0123456789ABCDEF"
self.config = Config(
- client_id="0123456789ABCDEF",
gateway_host="host",
user_login="login",
user_password="password",
@@ -33,7 +33,7 @@ class TestMQTTClient(unittest.TestCase):
self.message_registry = MessageRegistry()
self.key_registry = KeyRegistry(self.config.user_key)
self.client = MqttClient(
- client_id=self.config.client_id,
+ client_id=self.client_id,
transport=self.config.gateway_protocol,
userdata={
"config": self.config,
@@ -147,7 +147,7 @@ class TestMQTTClient(unittest.TestCase):
mock_subscribe.assert_called_once_with(
self.client,
[
- (self.config.client_id, 0),
+ (self.client_id, 0),
("clients", 0),
],
)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 3
} | 0.1 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"pytest"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cffi==1.17.1
coverage==7.8.0
cryptography==43.0.3
exceptiongroup==1.2.2
factory-boy==2.12.0
Faker==37.1.0
iniconfig==2.1.0
-e git+https://github.com/zpieslak/mobilus-client.git@702c41f2e88732f7331e7f68e32e667a7415b3c5#egg=mobilus_client
mypy==1.15.0
mypy-extensions==1.0.0
mypy-protobuf==3.6.0
packaging==24.2
paho-mqtt==1.6.1
pluggy==1.5.0
protobuf==4.25.6
pycparser==2.22
pytest==8.3.5
ruff==0.6.9
tomli==2.2.1
types-paho-mqtt==1.6.0.20240321
types-protobuf==4.25.0.20240417
typing_extensions==4.13.0
tzdata==2025.2
| name: mobilus-client
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.17.1
- coverage==7.8.0
- cryptography==43.0.3
- exceptiongroup==1.2.2
- factory-boy==2.12.0
- faker==37.1.0
- iniconfig==2.1.0
- mobilus-client==0.1.2
- mypy==1.15.0
- mypy-extensions==1.0.0
- mypy-protobuf==3.6.0
- packaging==24.2
- paho-mqtt==1.6.1
- pluggy==1.5.0
- protobuf==4.25.6
- pycparser==2.22
- pytest==8.3.5
- ruff==0.6.9
- tomli==2.2.1
- types-paho-mqtt==1.6.0.20240321
- types-protobuf==4.25.0.20240417
- typing-extensions==4.13.0
- tzdata==2025.2
prefix: /opt/conda/envs/mobilus-client
| [
"tests/test_mqtt_client.py::TestMQTTClient::test_on_connect",
"tests/test_mqtt_client.py::TestMQTTClient::test_send_request_with_call_events_request",
"tests/test_mqtt_client.py::TestMQTTClient::test_send_request_with_current_state_request",
"tests/test_mqtt_client.py::TestMQTTClient::test_send_request_with_devices_list_request",
"tests/test_mqtt_client.py::TestMQTTClient::test_send_request_with_login_request"
] | [] | [
"tests/test_app.py::TestApp::test_call_with_commands",
"tests/test_app.py::TestApp::test_call_with_empty_commands",
"tests/test_app.py::TestApp::test_call_with_invalid_gateway_host",
"tests/test_app.py::TestApp::test_call_with_not_authenticated",
"tests/test_app.py::TestApp::test_call_with_timeout_gateway_host",
"tests/test_app.py::TestApp::test_call_with_wrong_commands",
"tests/test_app.py::TestApp::test_init",
"tests/test_config.py::TestConfig::test_auth_timeout_period",
"tests/test_config.py::TestConfig::test_gateway_host",
"tests/test_config.py::TestConfig::test_gateway_port",
"tests/test_config.py::TestConfig::test_gateway_protocol",
"tests/test_config.py::TestConfig::test_timeout_period",
"tests/test_config.py::TestConfig::test_user_key",
"tests/test_config.py::TestConfig::test_user_login",
"tests/test_config.py::TestConfig::test_user_password",
"tests/test_mqtt_client.py::TestMQTTClient::test_init",
"tests/test_mqtt_client.py::TestMQTTClient::test_on_disconnect",
"tests/test_mqtt_client.py::TestMQTTClient::test_on_message_call_events_request_all_completed",
"tests/test_mqtt_client.py::TestMQTTClient::test_on_message_current_state_response_not_all_completed",
"tests/test_mqtt_client.py::TestMQTTClient::test_on_message_devices_list_response_not_all_completed",
"tests/test_mqtt_client.py::TestMQTTClient::test_on_message_invalid",
"tests/test_mqtt_client.py::TestMQTTClient::test_on_message_login_response",
"tests/test_mqtt_client.py::TestMQTTClient::test_on_subscribe",
"tests/test_mqtt_client.py::TestMQTTClient::test_send_request_when_not_connected",
"tests/test_mqtt_client.py::TestMQTTClient::test_send_request_with_wrong_command"
] | [] | MIT License | 19,986 | 753 | [
"mobilus_client/app.py",
"mobilus_client/config.py",
"mobilus_client/mqtt_client.py"
] |
|
tcgdex__python-sdk-2 | 4eef4dde0b662dab7278813782b268210c80124b | 2024-10-19 10:13:53 | 4eef4dde0b662dab7278813782b268210c80124b | diff --git a/src/tcgdexsdk/endpoints/Endpoint.py b/src/tcgdexsdk/endpoints/Endpoint.py
index 8cdb2e3..e6fa7ee 100644
--- a/src/tcgdexsdk/endpoints/Endpoint.py
+++ b/src/tcgdexsdk/endpoints/Endpoint.py
@@ -26,12 +26,8 @@ class Endpoint(Generic[Item, ListModel]):
self.endpoint = endpoint
async def get(self, id: str) -> Optional[Item]:
- return fetch(self.tcgdex, f"https://api.tcgdex.net/v2/en/{self.endpoint}/{id}", self.item_model)
+ print(self.tcgdex.language)
+ return fetch(self.tcgdex, f"https://api.tcgdex.net/v2/{self.tcgdex.language}/{self.endpoint}/{id}", self.item_model)
async def list(self, query: Optional[Query] = None) -> List[ListModel]:
- return fetch_list(self.tcgdex, f"https://api.tcgdex.net/v2/en/{self.endpoint}", self.list_model)
-
-# Usage example (you'd replace with actual implementations):
-# endpoint = Endpoint(tcgdex_instance, ItemModel, ListModel, 'cards')
-# item = await endpoint.get('some_id')
-# items_list = await endpoint.list(some_query)
+ return fetch_list(self.tcgdex, f"https://api.tcgdex.net/v2/{self.tcgdex.language}/{self.endpoint}", self.list_model)
| French TCGdex
Hello I tried to setup the TCGdex as followed : tcgdex = TCGdex('fr') or tcgdex = TCGdex('Language.FR')
But everything returned is in english I dont know if thats an issue or just not implemented | tcgdex/python-sdk | diff --git a/tests/.fixtures/test_fr.yaml b/tests/.fixtures/test_fr.yaml
new file mode 100644
index 0000000..7cee3e2
--- /dev/null
+++ b/tests/.fixtures/test_fr.yaml
@@ -0,0 +1,50 @@
+interactions:
+- request:
+ body: null
+ headers:
+ Connection:
+ - close
+ Host:
+ - api.tcgdex.net
+ User-Agent:
+ - '@tcgdex/[email protected]'
+ method: GET
+ uri: https://api.tcgdex.net/v2/fr/cards/swsh3-136
+ response:
+ body:
+ string: "{\"category\":\"Pok\xE9mon\",\"id\":\"swsh3-136\",\"illustrator\":\"tetsuya
+ koizumi\",\"image\":\"https://assets.tcgdex.net/fr/swsh/swsh3/136\",\"localId\":\"136\",\"name\":\"Fouinar\",\"rarity\":\"Peu
+ Commune\",\"set\":{\"cardCount\":{\"official\":189,\"total\":201},\"id\":\"swsh3\",\"logo\":\"https://assets.tcgdex.net/fr/swsh/swsh3/logo\",\"name\":\"T\xE9n\xE8bres
+ Embras\xE9es\",\"symbol\":\"https://assets.tcgdex.net/univ/swsh/swsh3/symbol\"},\"variants\":{\"firstEdition\":false,\"holo\":false,\"normal\":true,\"reverse\":true,\"wPromo\":false},\"dexId\":[162],\"hp\":110,\"types\":[\"Incolore\"],\"evolveFrom\":\"Fouinette\",\"stage\":\"Niveau
+ 1\",\"attacks\":[{\"cost\":[\"Incolore\"],\"name\":\"Mode Cool\",\"effect\":\"Piochez
+ 3 cartes.\"},{\"cost\":[\"Incolore\"],\"name\":\"\xC9clate-Queue\",\"effect\":\"Lancez
+ une pi\xE8ce. Si c'est pile, cette attaque ne fait rien.\",\"damage\":90}],\"weaknesses\":[{\"type\":\"Combat\",\"value\":\"\xD72\"}],\"retreat\":1,\"regulationMark\":\"D\",\"legal\":{\"standard\":false,\"expanded\":true},\"updated\":\"2024-06-18T00:34:39+02:00\"}"
+ headers:
+ Access-Control-Allow-Headers:
+ - DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range
+ Access-Control-Allow-Methods:
+ - GET,POST,OPTIONS
+ Access-Control-Allow-Origin:
+ - '*'
+ Access-Control-Expose-Headers:
+ - Content-Length,Content-Range
+ Alt-Svc:
+ - h3=":443"; ma=2592000
+ Cache-Control:
+ - no-cache, no-store, must-revalidate
+ Connection:
+ - close
+ Content-Length:
+ - '941'
+ Content-Type:
+ - application/json; charset=utf-8
+ Date:
+ - Sat, 19 Oct 2024 10:11:27 GMT
+ Etag:
+ - W/"3ad-GxFMqSJRz7F04mM69E8WZDa57k0"
+ X-Powered-By:
+ - Express
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/tests.py b/tests/tests.py
index a09a829..d91f88d 100644
--- a/tests/tests.py
+++ b/tests/tests.py
@@ -21,6 +21,16 @@ class APITest(unittest.IsolatedAsyncioTestCase):
def setUp(self):
self.api = TCGdex(Language.EN)
+ @_use_cassette
+ async def test_fr(self):
+ tcg = TCGdex(Language.FR)
+ res = await tcg.card.get('swsh3-136')
+ self.assertEqual(res.name, 'Fouinar')
+ tcg2 = TCGdex('fr')
+ res = await tcg2.card.get('swsh3-136')
+ self.assertEqual(res.name, 'Fouinar')
+
+
@_use_cassette
async def test_card_resume(self):
res = await self.api.card.list()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"vcrpy"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | dacite==1.9.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
multidict==6.2.0
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
propcache==0.3.1
pytest @ file:///croot/pytest_1738938843180/work
PyYAML==6.0.2
-e git+https://github.com/tcgdex/python-sdk.git@4eef4dde0b662dab7278813782b268210c80124b#egg=tcgdex_sdk
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
urllib3==1.26.20
vcrpy==7.0.0
wrapt==1.17.2
yarl==1.18.3
| name: python-sdk
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- dacite==1.9.2
- idna==3.10
- multidict==6.2.0
- propcache==0.3.1
- pyyaml==6.0.2
- tcgdex-sdk==0.1.0
- typing-extensions==4.13.0
- urllib3==1.26.20
- vcrpy==7.0.0
- wrapt==1.17.2
- yarl==1.18.3
prefix: /opt/conda/envs/python-sdk
| [
"tests/tests.py::APITest::test_fr"
] | [] | [
"tests/tests.py::APITest::test_card",
"tests/tests.py::APITest::test_card_resume",
"tests/tests.py::APITest::test_category_item",
"tests/tests.py::APITest::test_category_list",
"tests/tests.py::APITest::test_dexId_item",
"tests/tests.py::APITest::test_dexId_list",
"tests/tests.py::APITest::test_energyType_item",
"tests/tests.py::APITest::test_energyType_list",
"tests/tests.py::APITest::test_hp_item",
"tests/tests.py::APITest::test_hp_list",
"tests/tests.py::APITest::test_illustrator_item",
"tests/tests.py::APITest::test_illustrator_list",
"tests/tests.py::APITest::test_rarity_item",
"tests/tests.py::APITest::test_rarity_list",
"tests/tests.py::APITest::test_regulationMark_item",
"tests/tests.py::APITest::test_regulationMark_list",
"tests/tests.py::APITest::test_retreat_item",
"tests/tests.py::APITest::test_retreat_list",
"tests/tests.py::APITest::test_serie",
"tests/tests.py::APITest::test_serie_resume",
"tests/tests.py::APITest::test_set",
"tests/tests.py::APITest::test_set_resume",
"tests/tests.py::APITest::test_stage_item",
"tests/tests.py::APITest::test_stage_list",
"tests/tests.py::APITest::test_suffix_item",
"tests/tests.py::APITest::test_suffix_list",
"tests/tests.py::APITest::test_trainerType_item",
"tests/tests.py::APITest::test_trainerType_list",
"tests/tests.py::APITest::test_type_item",
"tests/tests.py::APITest::test_type_list",
"tests/tests.py::APITest::test_variant_item",
"tests/tests.py::APITest::test_variant_list"
] | [] | null | 19,989 | 330 | [
"src/tcgdexsdk/endpoints/Endpoint.py"
] |
|
projectmesa__mesa-2394 | 2dedca4a8fa7d9bd27e0b2942584009443524aee | 2024-10-21 09:45:15 | 134995f45c8ac3c1b4ce12ca05c21d4654962d02 | github-actions[bot]: Performance benchmarks:
| Model | Size | Init time [95% CI] | Run time [95% CI] |
|-----------------|--------|-------------------------|-------------------------|
| BoltzmannWealth | small | 🔵 +0.4% [-1.0%, +2.1%] | 🔵 -0.8% [-1.0%, -0.6%] |
| BoltzmannWealth | large | 🔵 +2.0% [+1.1%, +2.9%] | 🔴 +6.5% [+4.4%, +8.4%] |
| Schelling | small | 🔴 +3.6% [+3.2%, +4.0%] | 🔵 +0.8% [+0.5%, +1.1%] |
| Schelling | large | 🔵 +1.0% [+0.3%, +1.7%] | 🔵 -5.0% [-7.8%, -1.5%] |
| WolfSheep | small | 🔵 -1.4% [-1.7%, -1.1%] | 🔵 -0.8% [-1.1%, -0.6%] |
| WolfSheep | large | 🔵 -2.9% [-4.0%, -2.1%] | 🟢 -6.9% [-8.8%, -5.0%] |
| BoidFlockers | small | 🔵 +0.5% [+0.0%, +1.0%] | 🔵 -0.1% [-0.8%, +0.5%] |
| BoidFlockers | large | 🔵 -0.3% [-1.1%, +0.4%] | 🔵 -1.2% [-1.9%, -0.6%] |
EwoutH: Let's make it explicit what it does and call it `remove_all_agents`.
`remove_agents` we may want to use for something else also, like removing a certain AgentSet or list of Agents, or doing it conditionally, etc.
quaquel: I made the suggested name change.
EwoutH: Thanks!
Do we need to handle Agents being present in Spaces or other structures? (luckily they don't have schedulers anymore.) Or document something on how to handle this? And maybe test?
quaquel: This starts to feel like a round trip....
Tests were already included. I deliberately use agent.remove internally so it works for e.g., CellAgents. This is also stated in #2393. I will not bother with the existing spaces because they are a massive memory leak already and outside scope. The best solution for those is to subclass Agent.remove explicitly in your own custom agent class.
EwoutH: > This starts to feel like a round trip....
I understand the feeling. I think there's a balance here. From my perspective, I can do a full, proper review end of day when I can take the time for it, and bundle anything immeadiatly.
Or I can jot down a quick thought with the risk that I might have another one a bit later. In this case it were two thoughts within half an hour of the PR opening.
We also do have to acknowledge that PR authors and PR reviewers have different roles and thus different responsibilities, but both have an essential role to play. I understand it can feel a bit asymmetrical because you have done most feature work last week, but I know exactly what it feel like. Part of it is avoidable, but part of it is an essential pain of pushing things through at high velocity in a library committed to a long-term stable API.
Adding something to a standardized, user facing API just come with additional effort / costs compared to doing it yourself within the model.
I'm convinced if we keep appreciating both sides of this coin, whichever we occupy in a certain PR, we'll keep improving. I can't promise it will never be without any pain (for example, I do feel #2394), because sometimes a little pain a huge long term benefits. We can't prevent everything, but we can prevent a lot and should strive for that.
So do call things out, but always assume good intentions. If we need to discuss this further (in a call / face to face) let me know.
<hr>
> Tests were already included.
I a bit of robustness would be nice here. Could you add some Agents to a custom AgentSet and Grid, and see if they are properly removed?
> I deliberately use agent.remove internally so it works for e.g., CellAgents.
That's really nice, didn't know that!
> This is also stated in https://github.com/projectmesa/mesa/issues/2393.
In my view, PR descriptions should be readable on their own. We have [templates](https://github.com/projectmesa/mesa/tree/main/.github/PULL_REQUEST_TEMPLATE) for these, let me check if I can add them to a menu, never got to that.
They don't have to be epistels, sometimes a single line per header could be enough.
Personally, I have largely automated this process using LLMs. For example, I find if you add the issue description, the code diff (add .diff to the PR URL (https://github.com/projectmesa/mesa/pull/2394.diff)) and optionally a few other important points/thought, LLMs are amazing in formatting and generating usage examples.
<details><summary>Prompt</summary>
Write a concise PR description using the issue and diff, following the PR template.
Removing all agents in the model
I am currently updating my teaching materials to MESA 3. One of my assignments is an evolutionary version of Axelrod's emergence of collaboration. However, this requires removing the entire population of agents at each tick, and creating the next generation. (I know it can also be implemented differently...). At the moment, there is no convenience method for removing all agents from the model. Since `model.agents` returns a weakref agentset, doing operations on this won't work. It seems we need a model level method like `model.clear_agents()`, which would call `agent.remove` on each agent. It needs to run through `agent.remove` rather than just rebuild the model._agents datastructures to ensure that agents are also removed from the experimental cell spaces.
```
diff --git a/mesa/model.py b/mesa/model.py
index ac59cc1f1f9..ac6d5d3215c 100644
--- a/mesa/model.py
+++ b/mesa/model.py
@@ -276,3 +276,9 @@ def initialize_data_collector(
)
# Collect data for the first time during initialization.
self.datacollector.collect(self)
+
+ def remove_all_agents(self):
+ """Remove all agents from the model."""
+ # we need to wrap keys in a list to avoid a RunTimeError: dictionary changed size during iteration
+ for agent in list(self._agents.keys()):
+ agent.remove()
diff --git a/tests/test_model.py b/tests/test_model.py
index 7c343e46357..ff1fe297b6a 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -93,3 +93,18 @@ class Sheep(Agent):
assert model.agents_by_type[Wolf] == AgentSet([wolf], model)
assert model.agents_by_type[Sheep] == AgentSet([sheep], model)
assert len(model.agents_by_type) == 2
+
+
+def test_agent_remove():
+ """Test removing all agents from the model."""
+
+ class TestAgent(Agent):
+ pass
+
+ model = Model()
+ for _ in range(100):
+ TestAgent(model)
+ assert len(model.agents) == 100
+
+ model.remove_all_agents()
+ assert len(model.agents) == 0
```
PR template
```
## Summary
<!-- Provide a concise summary of the feature and its purpose. -->
## Motive
<!-- Explain the reasoning behind this feature. Include details on the problem it addresses or the enhancement it provides. -->
## Implementation
<!-- Describe how the feature was implemented. Include details on the approach taken, important decisions made, and code changes. -->
## Usage Examples
<!-- Provide code snippets or examples demonstrating how to use the new feature. Highlight key scenarios where this feature will be beneficial.
If you're modifying the visualisation, add before/after screenshots. -->
## Additional Notes
<!-- Add any additional information that may be relevant for the reviewers, such as potential side effects, dependencies, or related work. -->
```
</details>
<details><summary>Response</summary>
Here's a concise PR description following the provided template:
## Summary
Add a `remove_all_agents()` method to the Model class to facilitate removing all agents from a model in a single operation.
## Motive
This feature addresses the need to efficiently remove all agents from a model, particularly useful in scenarios like evolutionary simulations where the entire population needs to be replaced each tick. Currently, there's no convenient way to perform this operation, especially considering the weakref nature of `model.agents`.
## Implementation
- Added `remove_all_agents()` method to the `Model` class in `mesa/model.py`.
- The method iterates through all agents using `self._agents.keys()` wrapped in a list to avoid runtime errors due to dictionary size changes.
- Each agent is removed using the `agent.remove()` method to ensure proper cleanup, including removal from experimental cell spaces.
- Added a unit test in `tests/test_model.py` to verify the functionality of the new method.
## Usage Examples
```python
model = MyModel()
# ... add agents and run simulation ...
model.remove_all_agents() # Removes all agents from the model
assert len(model.agents) == 0
```
## Additional Notes
This method ensures that agents are properly removed using their `remove()` method, maintaining consistency with other agent removal operations in Mesa.
</details>
Of course you have to review and where necessary edit these, and never blindly copy, but I find they are often 90%+ of the case good, and often I only edit a few words or a add a sentence somewhere. And writing them yourself also has some charm, so sometimes I also do that.
Good PR descriptions are helpful for reviewers, but also historical documentation and especially useful in a public-facing library, where users might want to know why a feature is designed like the way it is.
> I will not bother with the existing spaces because they are a massive memory leak already and outside scope.
Okay, in this specific case I can get behind this, but maybe add a line of documentation that Agents are removed from the cell space automatically, but not from the current spaces.
> The best solution for those is to subclass Agent.remove explicitly in your own custom agent class.
I was also thinking in that direction. Would a simple test case that shows this be possible?
quaquel: > A bit of robustness would be nice here. Could you add some Agents to a custom AgentSet and Grid, and see if they are properly removed?
I don't understand what you are proposing. This adds a method to the model class so why test other non-Model class related things? That is more of a form of integration testing. Moreover, mesa.space.Grid and its subclasses won't work because its not covered in Agent.remove (and cannot be covered there because agents don't know about mesa.space classes.
> Okay, in this specific case I can get behind this, but maybe add a line of documentation that Agents are removed from the cell space automatically, but not from the current spaces.
> I was also thinking in that direction. Would a simple test case that shows this be possible?
No. What would make more sense is improving the documentation of `Agent.remove`. If we expand this documentation you introduce potential unwanted dependencies in the docs between the agent class and the model class. Therefore adding tests in test_model for testing `agent.remove` also appears strange to me.
EwoutH: From my view the difference is when removing an Agent in a loop or one by one, you often have code around it to handle other removals, like the space. With `remove_all_agents`, users might think, "oh, that's convenient, a proper build in function!" and then that's not anymore the case.
In my opinion there should be something somewhere for people to find to resolve their error when inevitably they think remove all agents from a model that has a Space. It doesn't need to handle it perfectly all the time be default, but a piece of documentation or testing should be there. That's the cost of doing business in a public API.
Once we have deprecated the spaces formally, this will be a different story. And `remove_all_agents` could also be just a function of a custom user model, since their really isn't that much added value to add it to Mesa if it doesn't handle supported (maintenance or not) edge cases properly.
Of course any maintainers might have different opinions about any of this, and I'm happy to follow a majority.
quaquel: > From my view the difference is when removing an Agent in a loop or one by one, you often have code around it to handle other removals, like the space. With remove_all_agents, users might think, "oh, that's convenient, a proper build in function!" and then that's not anymore the case.
Fair enough. I have added something on this in `Agent.remove` _and_ `Model.remove_all_agents`. | diff --git a/mesa/agent.py b/mesa/agent.py
index 18454164..200dab6f 100644
--- a/mesa/agent.py
+++ b/mesa/agent.py
@@ -67,7 +67,13 @@ class Agent:
self.model.register_agent(self)
def remove(self) -> None:
- """Remove and delete the agent from the model."""
+ """Remove and delete the agent from the model.
+
+ Notes:
+ If you need to do additional cleanup when removing an agent by for example removing
+ it from a space, consider extending this method in your own agent class.
+
+ """
with contextlib.suppress(KeyError):
self.model.deregister_agent(self)
diff --git a/mesa/model.py b/mesa/model.py
index ac59cc1f..f9c0e2e4 100644
--- a/mesa/model.py
+++ b/mesa/model.py
@@ -276,3 +276,16 @@ class Model:
)
# Collect data for the first time during initialization.
self.datacollector.collect(self)
+
+ def remove_all_agents(self):
+ """Remove all agents from the model.
+
+ Notes:
+ This method calls agent.remove for all agents in the model. If you need to remove agents from
+ e.g., a SingleGrid, you can either explicitly implement your own agent.remove method or clean this up
+ near where you are calling this method.
+
+ """
+ # we need to wrap keys in a list to avoid a RunTimeError: dictionary changed size during iteration
+ for agent in list(self._agents.keys()):
+ agent.remove()
| Removing all agents in the model
I am currently updating my teaching materials to MESA 3. One of my assignments is an evolutionary version of Axelrod's emergence of collaboration. However, this requires removing the entire population of agents at each tick, and creating the next generation. (I know it can also be implemented differently...). At the moment, there is no convenience method for removing all agents from the model. Since `model.agents` returns a weakref agentset, doing operations on this won't work. It seems we need a model level method like `model.clear_agents()`, which would call `agent.remove` on each agent. It needs to run through `agent.remove` rather than just rebuild the model._agents datastructures to ensure that agents are also removed from the experimental cell spaces. | projectmesa/mesa | diff --git a/tests/test_model.py b/tests/test_model.py
index 7c343e46..ff1fe297 100644
--- a/tests/test_model.py
+++ b/tests/test_model.py
@@ -93,3 +93,18 @@ def test_agents_by_type():
assert model.agents_by_type[Wolf] == AgentSet([wolf], model)
assert model.agents_by_type[Sheep] == AgentSet([sheep], model)
assert len(model.agents_by_type) == 2
+
+
+def test_agent_remove():
+ """Test removing all agents from the model."""
+
+ class TestAgent(Agent):
+ pass
+
+ model = Model()
+ for _ in range(100):
+ TestAgent(model)
+ assert len(model.agents) == 100
+
+ model.remove_all_agents()
+ assert len(model.agents) == 0
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.10",
"reqs_path": null,
"test_cmd": "pytest -rA -Werror -Wdefault::FutureWarning tests/"
} | accessible-pygments==0.0.5
alabaster==1.0.0
anyio==4.9.0
arrow==1.3.0
asttokens==3.0.0
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
binaryornot==0.4.4
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
click==8.1.8
comm==0.2.2
contourpy==1.3.1
cookiecutter==2.6.0
coverage==7.8.0
cycler==0.12.1
debugpy==1.8.13
decorator==5.2.1
docutils==0.21.2
exceptiongroup==1.2.2
executing==2.2.0
fastjsonschema==2.21.1
filelock==3.18.0
fonttools==4.56.0
greenlet==3.1.1
h11==0.14.0
humanize==4.12.2
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==6.29.5
ipython==8.34.0
ipyvue==1.11.2
ipyvuetify==1.11.1
ipywidgets==8.1.5
jedi==0.19.2
Jinja2==3.1.6
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-cache==1.0.1
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyterlab_widgets==3.0.13
kiwisolver==1.4.8
Markdown==3.7
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.10.1
matplotlib-inline==0.1.7
mdit-py-plugins==0.4.2
mdurl==0.1.2
-e git+https://github.com/projectmesa/mesa.git@2dedca4a8fa7d9bd27e0b2942584009443524aee#egg=Mesa
myst-nb==1.2.0
myst-parser==4.0.1
nbclient==0.10.2
nbformat==5.10.4
nest-asyncio==1.6.0
networkx==3.4.2
numpy==2.2.4
packaging==24.2
pandas==2.2.3
parso==0.8.4
pexpect==4.9.0
pillow==11.1.0
platformdirs==4.3.7
pluggy==1.5.0
prompt_toolkit==3.0.50
psutil==7.0.0
ptyprocess==0.7.0
pure_eval==0.2.3
pydata-sphinx-theme==0.16.1
Pygments==2.19.1
pymdown-extensions==10.14.3
pyparsing==3.2.3
pytest==8.3.5
pytest-cov==6.0.0
pytest-mock==3.14.0
python-dateutil==2.9.0.post0
python-slugify==8.0.4
pytz==2025.2
PyYAML==6.0.2
pyzmq==26.3.0
reacton==1.9.1
referencing==0.36.2
requests==2.32.3
rich==14.0.0
rich-click==1.8.8
rpds-py==0.24.0
ruff==0.11.2
scipy==1.15.2
seaborn==0.13.2
six==1.17.0
sniffio==1.3.1
snowballstemmer==2.2.0
solara==1.44.1
solara-server==1.44.1
solara-ui==1.44.1
soupsieve==2.6
Sphinx==8.1.3
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
SQLAlchemy==2.0.40
stack-data==0.6.3
starlette==0.46.1
tabulate==0.9.0
text-unidecode==1.3
tomli==2.2.1
tornado==6.4.2
tqdm==4.67.1
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
uvicorn==0.34.0
watchdog==6.0.0
watchfiles==1.0.4
wcwidth==0.2.13
websockets==15.0.1
widgetsnbextension==4.0.13
zipp==3.21.0
| name: mesa
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- accessible-pygments==0.0.5
- alabaster==1.0.0
- anyio==4.9.0
- arrow==1.3.0
- asttokens==3.0.0
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- binaryornot==0.4.4
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- click==8.1.8
- comm==0.2.2
- contourpy==1.3.1
- cookiecutter==2.6.0
- coverage==7.8.0
- cycler==0.12.1
- debugpy==1.8.13
- decorator==5.2.1
- docutils==0.21.2
- exceptiongroup==1.2.2
- executing==2.2.0
- fastjsonschema==2.21.1
- filelock==3.18.0
- fonttools==4.56.0
- greenlet==3.1.1
- h11==0.14.0
- humanize==4.12.2
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==6.29.5
- ipython==8.34.0
- ipyvue==1.11.2
- ipyvuetify==1.11.1
- ipywidgets==8.1.5
- jedi==0.19.2
- jinja2==3.1.6
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-cache==1.0.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyterlab-widgets==3.0.13
- kiwisolver==1.4.8
- markdown==3.7
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib==3.10.1
- matplotlib-inline==0.1.7
- mdit-py-plugins==0.4.2
- mdurl==0.1.2
- mesa==3.0.0b1
- myst-nb==1.2.0
- myst-parser==4.0.1
- nbclient==0.10.2
- nbformat==5.10.4
- nest-asyncio==1.6.0
- networkx==3.4.2
- numpy==2.2.4
- packaging==24.2
- pandas==2.2.3
- parso==0.8.4
- pexpect==4.9.0
- pillow==11.1.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prompt-toolkit==3.0.50
- psutil==7.0.0
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pydata-sphinx-theme==0.16.1
- pygments==2.19.1
- pymdown-extensions==10.14.3
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- python-dateutil==2.9.0.post0
- python-slugify==8.0.4
- pytz==2025.2
- pyyaml==6.0.2
- pyzmq==26.3.0
- reacton==1.9.1
- referencing==0.36.2
- requests==2.32.3
- rich==14.0.0
- rich-click==1.8.8
- rpds-py==0.24.0
- ruff==0.11.2
- scipy==1.15.2
- seaborn==0.13.2
- six==1.17.0
- sniffio==1.3.1
- snowballstemmer==2.2.0
- solara==1.44.1
- solara-server==1.44.1
- solara-ui==1.44.1
- soupsieve==2.6
- sphinx==8.1.3
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sqlalchemy==2.0.40
- stack-data==0.6.3
- starlette==0.46.1
- tabulate==0.9.0
- text-unidecode==1.3
- tomli==2.2.1
- tornado==6.4.2
- tqdm==4.67.1
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==2.3.0
- uvicorn==0.34.0
- watchdog==6.0.0
- watchfiles==1.0.4
- wcwidth==0.2.13
- websockets==15.0.1
- widgetsnbextension==4.0.13
- zipp==3.21.0
prefix: /opt/conda/envs/mesa
| [
"tests/test_model.py::test_agent_remove"
] | [] | [
"tests/test_model.py::test_model_set_up",
"tests/test_model.py::test_running",
"tests/test_model.py::test_seed",
"tests/test_model.py::test_reset_randomizer",
"tests/test_model.py::test_reset_rng",
"tests/test_model.py::test_agent_types",
"tests/test_model.py::test_agents_by_type"
] | [] | Apache License 2.0 | 20,002 | 391 | [
"mesa/agent.py",
"mesa/model.py"
] |
google__flax-4317 | c1046ad08f885e3ff35e6fdec2bd0b6495e04e22 | 2024-10-21 09:50:38 | b5d4ed85d376c592d6265895ef7d71830d4570a0 | diff --git a/flax/nnx/statelib.py b/flax/nnx/statelib.py
index 59461fc4..3e27937d 100644
--- a/flax/nnx/statelib.py
+++ b/flax/nnx/statelib.py
@@ -179,11 +179,17 @@ class State(MutableMapping[K, V], reprlib.Representable):
def replace_by_pure_dict(self,
pure_dict: dict[str, tp.Any],
replace_fn: SetValueFn | None = None):
+ def try_convert_int(x):
+ try:
+ return int(x)
+ except ValueError:
+ return x
# Works for nnx.Variable and nnx.VariableState
if replace_fn is None:
replace_fn = lambda x, v: x.replace(v) if hasattr(x, 'replace') else v
current_flat = self.flat_state()
for kp, v in traversals.flatten_mapping(pure_dict).items():
+ kp = tuple(map(try_convert_int, kp))
if kp not in current_flat:
raise ValueError(f'key in pure_dict not available in state: {kp}')
current_flat[kp] = replace_fn(current_flat[kp], v)
| Issue on restoring NNX state from Orbax checkpoint as pure dict
Dear team,
I'm having an issue restoring pure dict from nnx.state,
Orbax will change the key has int type into string type.
Is there any way to fix it?
I put a sample code that can reproduce the issue.
```python
from flax import nnx
import orbax.checkpoint as ocp
import jax
class MLPs(nnx.Module):
def __init__(self, dim, rngs: nnx.Rngs):
self.layers = []
for _ in range(4):
self.layers.append(nnx.Linear(dim, dim, rngs=rngs, use_bias=False))
def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x
def test1():
model = MLPs(4, rngs=nnx.Rngs(0))
x = jax.random.normal(jax.random.key(42), (3, 4))
assert model(x).shape == (3, 4)
_, state = nnx.split(model)
pure_dict_state = state.to_pure_dict()
nnx.display(pure_dict_state)
ckpt_dir = ocp.test_utils.erase_and_create_empty('/tmp/my-checkpoints/')
checkpointer = ocp.StandardCheckpointer()
# checkpointer.save(ckpt_dir / 'state', state)
checkpointer.save(ckpt_dir / 'pure_dict', pure_dict_state)
# Restore as a pure dictionary.
restored_pure_dict = checkpointer.restore(ckpt_dir / 'pure_dict')
nnx.display(restored_pure_dict)
abstract_model = nnx.eval_shape(lambda: MLPs(4, rngs=nnx.Rngs(0)))
graphdef, abstract_state = nnx.split(abstract_model)
abstract_state.replace_by_pure_dict(restored_pure_dict)
model = nnx.merge(graphdef, abstract_state)
assert model(x).shape == (3, 4) # The model still works!
if __name__ == '__main__' :
test1()
```
This code throws the error following
`ValueError: key in pure_dict not available in state: ('layers', '0', 'bias') `
| google/flax | diff --git a/tests/nnx/integration_test.py b/tests/nnx/integration_test.py
index 1742e379..7b572f4b 100644
--- a/tests/nnx/integration_test.py
+++ b/tests/nnx/integration_test.py
@@ -12,12 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import tempfile
import typing as tp
from absl.testing import absltest
import jax
import jax.numpy as jnp
import numpy as np
+import orbax.checkpoint as ocp
from flax import nnx
@@ -259,6 +261,44 @@ class TestIntegration(absltest.TestCase):
assert 'y' in intermediates
+ def test_replace_by_pure_dict(self):
+ class MLPs(nnx.Module):
+ def __init__(self, dim, rngs: nnx.Rngs):
+ self.layers = []
+ for _ in range(4):
+ self.layers.append(nnx.Linear(dim, dim, rngs=rngs, use_bias=False))
+
+ def __call__(self, x):
+ for layer in self.layers:
+ x = layer(x)
+ return x
+
+ model = MLPs(4, rngs=nnx.Rngs(0))
+ x = jax.random.normal(jax.random.key(42), (3, 4))
+ assert model(x).shape == (3, 4)
+
+ _, state = nnx.split(model)
+ pure_dict_state = state.to_pure_dict()
+ nnx.display(pure_dict_state)
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ ckpt_dir = ocp.test_utils.erase_and_create_empty(
+ tmpdir + '/my-checkpoints/'
+ )
+ checkpointer = ocp.StandardCheckpointer()
+ # checkpointer.save(ckpt_dir / 'state', state)
+ checkpointer.save(ckpt_dir / 'pure_dict', pure_dict_state)
+
+ # Restore as a pure dictionary.
+ restored_pure_dict = checkpointer.restore(ckpt_dir / 'pure_dict')
+ nnx.display(restored_pure_dict)
+
+ abstract_model = nnx.eval_shape(lambda: MLPs(4, rngs=nnx.Rngs(0)))
+ graphdef, abstract_state = nnx.split(abstract_model)
+ abstract_state.replace_by_pure_dict(restored_pure_dict)
+ model = nnx.merge(graphdef, abstract_state)
+ assert model(x).shape == (3, 4) # The model still works!
+
if __name__ == '__main__':
absltest.main()
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.10",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | absl-py==2.2.1
chex==0.1.89
contourpy==1.3.1
coverage==7.8.0
cycler==0.12.1
etils==1.12.2
exceptiongroup==1.2.2
execnet==2.1.1
-e git+https://github.com/google/flax.git@c1046ad08f885e3ff35e6fdec2bd0b6495e04e22#egg=flax
fonttools==4.56.0
fsspec==2025.3.1
humanize==4.12.2
importlib_resources==6.5.2
iniconfig==2.1.0
jax==0.5.3
jaxlib==0.5.3
kiwisolver==1.4.8
markdown-it-py==3.0.0
matplotlib==3.10.1
mdurl==0.1.2
ml_dtypes==0.5.1
msgpack==1.1.0
nest-asyncio==1.6.0
numpy==2.2.4
opt_einsum==3.4.0
optax==0.2.4
orbax-checkpoint==0.11.10
packaging==24.2
pillow==11.1.0
pluggy==1.5.0
protobuf==6.30.2
Pygments==2.19.1
pyparsing==3.2.3
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
PyYAML==6.0.2
rich==14.0.0
scipy==1.15.2
simplejson==3.20.1
six==1.17.0
tensorstore==0.1.73
tomli==2.2.1
toolz==1.0.0
typing_extensions==4.13.0
zipp==3.21.0
| name: flax
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- absl-py==2.2.1
- chex==0.1.89
- contourpy==1.3.1
- coverage==7.8.0
- cycler==0.12.1
- etils==1.12.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- flax==0.10.0
- fonttools==4.56.0
- fsspec==2025.3.1
- humanize==4.12.2
- importlib-resources==6.5.2
- iniconfig==2.1.0
- jax==0.5.3
- jaxlib==0.5.3
- kiwisolver==1.4.8
- markdown-it-py==3.0.0
- matplotlib==3.10.1
- mdurl==0.1.2
- ml-dtypes==0.5.1
- msgpack==1.1.0
- nest-asyncio==1.6.0
- numpy==2.2.4
- opt-einsum==3.4.0
- optax==0.2.4
- orbax-checkpoint==0.11.10
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- protobuf==6.30.2
- pygments==2.19.1
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- rich==14.0.0
- scipy==1.15.2
- simplejson==3.20.1
- six==1.17.0
- tensorstore==0.1.73
- tomli==2.2.1
- toolz==1.0.0
- typing-extensions==4.13.0
- zipp==3.21.0
prefix: /opt/conda/envs/flax
| [
"tests/nnx/integration_test.py::TestIntegration::test_replace_by_pure_dict"
] | [] | [
"tests/nnx/integration_test.py::TestIntegration::test_functional_example",
"tests/nnx/integration_test.py::TestIntegration::test_intermediates_example",
"tests/nnx/integration_test.py::TestIntegration::test_intermediates_example_functional",
"tests/nnx/integration_test.py::TestIntegration::test_shared_modules",
"tests/nnx/integration_test.py::TestIntegration::test_shared_modules_pure",
"tests/nnx/integration_test.py::TestIntegration::test_stateful_example"
] | [] | Apache License 2.0 | 20,003 | 280 | [
"flax/nnx/statelib.py"
] |
|
dag-hammarskjold-library__dlx-402 | 5bdb082b8296696afc9c7398822bce8438f59b1e | 2024-10-21 15:10:39 | 7bc9c436adc657b48945eafa70a190381864e26c | diff --git a/dlx/scripts/init_indexes.py b/dlx/scripts/init_indexes.py
index 7d99357..ad89d0e 100644
--- a/dlx/scripts/init_indexes.py
+++ b/dlx/scripts/init_indexes.py
@@ -56,7 +56,7 @@ def run():
collation=Config.marc_index_default_collation
)
)
- indexes.append(col.create_index('_record_type', collation=Config.marc_index_default_collation))
+ indexes.append(col.create_index('_record_type_collated', collation=Config.marc_index_default_collation))
print(f'creating text indexes...')
indexes.append(col.create_index('words', collation=Config.marc_index_default_collation))
diff --git a/dlx/util.py b/dlx/util.py
index 37de978..4d2c016 100644
--- a/dlx/util.py
+++ b/dlx/util.py
@@ -61,7 +61,12 @@ class Table():
def set(self, rowx, field_name, value):
self.index.setdefault(rowx, {})
self.index[rowx].setdefault(field_name, {})
- self.index[rowx][field_name] = value
+
+ if existing := self.index[rowx].get(field_name):
+ # if this is a repeated subfield, append the next value
+ self.index[rowx][field_name] += f'||{value}'
+ else:
+ self.index[rowx][field_name] = value
if field_name not in self.header:
self.header.append(field_name)
| Handle repeated subfields in CSV/TSV export
Currently, in cases of fields with repeated subfield codes, only one of the subfields makes it into the serialization. This is due to the tabular nature of the serialization.
- [ ] Concatenate the values of repeated subfields into one string joined by a separator i.e. ";" or "||" | dag-hammarskjold-library/dlx | diff --git a/tests/test_marcset.py b/tests/test_marcset.py
index 21bac69..d7ff4b8 100644
--- a/tests/test_marcset.py
+++ b/tests/test_marcset.py
@@ -168,7 +168,7 @@ def test_to_csv(db):
bibset = BibSet.from_query({})
bibset.records = list(bibset.records)
#[bib.set('001', None, str(bib.id)) for bib in bibset.records]
- assert bibset.to_csv(write_id=True) == '1.001,1.245$a,1.245$b,1.245$c,1.520$a,2.520$a,1.650$a,1.710$a\n1,This,is the,title,Description,Repeated subfield,Header,Another header\n2,Another,is the,title,,,Header,'
+ assert bibset.to_csv(write_id=True) == '1.001,1.245$a,1.245$b,1.245$c,1.520$a,2.520$a,1.650$a,1.710$a\n1,This,is the,title,Description,Another description||Repeated subfield,Header,Another header\n2,Another,is the,title,,,Header,'
# comma and quote handling
bibs = BibSet()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==23.2.0
boto3==1.34.100
botocore==1.34.100
certifi==2024.7.4
cffi==1.16.0
charset-normalizer==3.3.2
click==8.1.7
cryptography==43.0.1
-e git+https://github.com/dag-hammarskjold-library/dlx.git@5bdb082b8296696afc9c7398822bce8438f59b1e#egg=dlx
dnspython==2.6.1
exceptiongroup==1.2.1
idna==3.7
iniconfig==2.0.0
Jinja2==3.1.4
jmespath==1.0.1
joblib==1.4.2
jsonschema==4.0.0
lxml==5.2.1
MarkupSafe==2.1.5
mongomock==4.1.2
moto==5.0.8
nltk==3.9.1
packaging==24.0
pluggy==1.5.0
pycparser==2.22
pymongo==4.6.3
pyrsistent==0.20.0
pytest==8.2.0
python-dateutil==2.9.0.post0
pytz==2024.1
PyYAML==6.0.1
regex==2024.9.11
requests==2.32.3
responses==0.25.0
s3transfer==0.10.3
sentinels==1.0.0
six==1.16.0
tomli==2.0.1
tqdm==4.66.5
urllib3==1.26.19
Werkzeug==3.0.4
xlrd==2.0.1
xmldiff==2.4
xmltodict==0.13.0
| name: dlx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==23.2.0
- boto3==1.34.100
- botocore==1.34.100
- certifi==2024.7.4
- cffi==1.16.0
- charset-normalizer==3.3.2
- click==8.1.7
- cryptography==43.0.1
- dnspython==2.6.1
- exceptiongroup==1.2.1
- idna==3.7
- iniconfig==2.0.0
- jinja2==3.1.4
- jmespath==1.0.1
- joblib==1.4.2
- jsonschema==4.0.0
- lxml==5.2.1
- markupsafe==2.1.5
- mongomock==4.1.2
- moto==5.0.8
- nltk==3.9.1
- packaging==24.0
- pluggy==1.5.0
- pycparser==2.22
- pymongo==4.6.3
- pyrsistent==0.20.0
- pytest==8.2.0
- python-dateutil==2.9.0.post0
- pytz==2024.1
- pyyaml==6.0.1
- regex==2024.9.11
- requests==2.32.3
- responses==0.25.0
- s3transfer==0.10.3
- sentinels==1.0.0
- six==1.16.0
- tomli==2.0.1
- tqdm==4.66.5
- urllib3==1.26.19
- werkzeug==3.0.4
- xlrd==2.0.1
- xmldiff==2.4
- xmltodict==0.13.0
prefix: /opt/conda/envs/dlx
| [
"tests/test_marcset.py::test_to_csv"
] | [] | [
"tests/test_marcset.py::test_mocked",
"tests/test_marcset.py::test_init",
"tests/test_marcset.py::test_iterate",
"tests/test_marcset.py::test_from_query",
"tests/test_marcset.py::test_from_ids",
"tests/test_marcset.py::test_from_table",
"tests/test_marcset.py::test_from_mrk",
"tests/test_marcset.py::test_from_xml",
"tests/test_marcset.py::test_to_mrc",
"tests/test_marcset.py::test_to_mrk",
"tests/test_marcset.py::test_to_xml",
"tests/test_marcset.py::test_xml_encoding",
"tests/test_marcset.py::test_to_str",
"tests/test_marcset.py::test_from_aggregation"
] | [] | null | 20,009 | 360 | [
"dlx/scripts/init_indexes.py",
"dlx/util.py"
] |
|
IAMconsortium__nomenclature-419 | 7913ff11b22dc6af9a7fbff4d79f66e62d854378 | 2024-10-22 15:40:55 | cf930aa4fc3782f53bfa7a1c20d3f045d810cd56 | dc-almeida: Thanks for the hawk-eye as always!
phackstock: > Thanks for the hawk-eye as always!
You're welcome, always easier to spot these things in other people's work :D
phackstock: Before we go ahead with the merge if I understood our discussion in #419 correctly, this PR is still missing a dimensions option so that you can specify which dimension you want to validate, correct @danielhuppmann?
danielhuppmann: I have a suggestion: we merge as is, and then @dc-almeida does a follow-up PR adding the dimensions for this CLI, and also extending the tests for the `validate-scenario` and `validate-project` to make sure that all three cases work as expected:
- explicit `dimensions` argument (this is currently tested in `test_cli_custom_dimensions_runs()`)
- if not given, use `dimensions` argument set from `nomenclature.yaml` (not sure if this is tested)
- if also not given, check all available folders
phackstock: Sounds good to me. | diff --git a/nomenclature/cli.py b/nomenclature/cli.py
index 33cdafc..18ef895 100644
--- a/nomenclature/cli.py
+++ b/nomenclature/cli.py
@@ -277,3 +277,29 @@ def cli_run_workflow(
df = getattr(workflow, workflow_function)(IamDataFrame(input_file))
if output_file is not None:
df.to_excel(output_file)
+
+
[email protected]("validate-scenarios")
[email protected]("input_file", type=click.Path(exists=True, path_type=Path))
[email protected](
+ "--definitions",
+ help="Optional name for definitions folder",
+ type=click.Path(exists=True, path_type=Path),
+ default="definitions",
+)
+def cli_validate_scenarios(input_file: Path, definitions: Path):
+ """Validate a scenario file against the codelists of a project
+
+ Parameters
+ ----------
+ input_file : Path
+ Input data file, must be IAMC format, .xlsx or .csv
+ definitions : Path
+ Definitions folder with codelists, by default "definitions"
+
+ Raises
+ ------
+ ValueError
+ If input_file validation fails against specified codelist(s).
+ """
+ DataStructureDefinition(definitions).validate(IamDataFrame(input_file))
| CLI to validate a scenario data file
To make it easier for non-expert users to check their data against a DataStructureDefinition, we need a simple CLI that takes an input file (IAMC scenario data) and validates it against a the definitions.
Basically, this should be a reduced version of `cli_run_workflow`, but only take an `input file` (required) and a `definitions` folder (default to 'definitions').
| IAMconsortium/nomenclature | diff --git a/tests/test_cli.py b/tests/test_cli.py
index dd1b342..9eaee1f 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -420,3 +420,32 @@ def test_cli_run_workflow(tmp_path, simple_df):
)
assert_iamframe_equal(simple_df, IamDataFrame(tmp_path / "output.xlsx"))
+
+
[email protected](
+ "status, unit, exit_code", [("valid", "EJ/yr", 0), ("invalid", "EJ", 1)]
+)
+def test_cli_valid_scenarios(status, unit, exit_code, tmp_path):
+ """Check that CLI validates an IAMC dataset according to defined codelist."""
+ IamDataFrame(
+ pd.DataFrame(
+ [
+ ["m_a", "s_a", "World", "Primary Energy", unit, 1, 2],
+ ],
+ columns=IAMC_IDX + [2005, 2010],
+ )
+ ).to_excel(tmp_path / f"{status}_data.xlsx")
+ result_valid = runner.invoke(
+ cli,
+ [
+ "validate-scenarios",
+ str(tmp_path / f"{status}_data.xlsx"),
+ "--definitions",
+ str(
+ MODULE_TEST_DATA_DIR
+ / "structure_validation_no_mappings"
+ / "definitions"
+ ),
+ ],
+ )
+ assert result_valid.exit_code == exit_code
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 0.19 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"mypy",
"flake8",
"coverage"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.10",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alembic==1.15.2
annotated-types==0.7.0
anyio==4.9.0
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
contourpy==1.3.1
coverage==7.8.0
cycler==0.12.1
et_xmlfile==2.0.0
exceptiongroup==1.2.2
execnet==2.1.1
fastapi==0.115.12
flake8==7.2.0
flexcache==0.3
flexparser==0.4
fonttools==4.56.0
gitdb==4.0.12
GitPython==3.1.44
greenlet==3.1.1
h11==0.14.0
h2==4.2.0
hpack==4.1.0
httpcore==1.0.7
httpx==0.28.1
hyperframe==6.1.0
iam-units==2023.9.12
idna==3.10
iniconfig==2.1.0
ixmp4==0.9.8
kiwisolver==1.4.8
Mako==1.3.9
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.10.1
mccabe==0.7.0
mdurl==0.1.2
mypy==1.15.0
mypy-extensions==1.0.0
-e git+https://github.com/IAMconsortium/nomenclature.git@7913ff11b22dc6af9a7fbff4d79f66e62d854378#egg=nomenclature_iamc
numpy==1.26.4
openpyxl==3.1.5
packaging==24.2
pandas==2.2.3
pandera==0.23.1
pillow==11.1.0
Pint==0.24.4
platformdirs==4.3.7
pluggy==1.5.0
psycopg==3.2.6
psycopg-binary==3.2.6
pyam-iamc==2.3.0
pycodestyle==2.13.0
pycountry==23.12.11
pydantic==2.11.1
pydantic-settings==2.8.1
pydantic_core==2.33.0
pyflakes==3.3.2
Pygments==2.19.1
PyJWT==2.10.1
pyparsing==3.2.3
pysquirrel==1.1
pytest==8.3.5
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
python-dotenv==1.1.0
pytz==2025.2
PyYAML==6.0.2
requests==2.32.3
rich==14.0.0
scipy==1.15.2
seaborn==0.13.2
shellingham==1.5.4
six==1.17.0
smmap==5.0.2
sniffio==1.3.1
SQLAlchemy==2.0.40
SQLAlchemy-Utils==0.41.2
starlette==0.46.1
toml==0.10.2
tomli==2.2.1
typeguard==4.4.2
typer==0.15.2
typing-inspect==0.9.0
typing-inspection==0.4.0
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
wquantiles==0.6
XlsxWriter==3.2.2
| name: nomenclature
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alembic==1.15.2
- annotated-types==0.7.0
- anyio==4.9.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- contourpy==1.3.1
- coverage==7.8.0
- cycler==0.12.1
- et-xmlfile==2.0.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- fastapi==0.115.12
- flake8==7.2.0
- flexcache==0.3
- flexparser==0.4
- fonttools==4.56.0
- gitdb==4.0.12
- gitpython==3.1.44
- greenlet==3.1.1
- h11==0.14.0
- h2==4.2.0
- hpack==4.1.0
- httpcore==1.0.7
- httpx==0.28.1
- hyperframe==6.1.0
- iam-units==2023.9.12
- idna==3.10
- iniconfig==2.1.0
- ixmp4==0.9.8
- kiwisolver==1.4.8
- mako==1.3.9
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib==3.10.1
- mccabe==0.7.0
- mdurl==0.1.2
- mypy==1.15.0
- mypy-extensions==1.0.0
- nomenclature-iamc==0.19.2.dev1+7913ff1
- numpy==1.26.4
- openpyxl==3.1.5
- packaging==24.2
- pandas==2.2.3
- pandera==0.23.1
- pillow==11.1.0
- pint==0.24.4
- platformdirs==4.3.7
- pluggy==1.5.0
- psycopg==3.2.6
- psycopg-binary==3.2.6
- pyam-iamc==2.3.0
- pycodestyle==2.13.0
- pycountry==23.12.11
- pydantic==2.11.1
- pydantic-core==2.33.0
- pydantic-settings==2.8.1
- pyflakes==3.3.2
- pygments==2.19.1
- pyjwt==2.10.1
- pyparsing==3.2.3
- pysquirrel==1.1
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- python-dotenv==1.1.0
- pytz==2025.2
- pyyaml==6.0.2
- requests==2.32.3
- rich==14.0.0
- scipy==1.15.2
- seaborn==0.13.2
- shellingham==1.5.4
- six==1.17.0
- smmap==5.0.2
- sniffio==1.3.1
- sqlalchemy==2.0.40
- sqlalchemy-utils==0.41.2
- starlette==0.46.1
- toml==0.10.2
- tomli==2.2.1
- typeguard==4.4.2
- typer==0.15.2
- typing-extensions==4.13.0
- typing-inspect==0.9.0
- typing-inspection==0.4.0
- tzdata==2025.2
- urllib3==2.3.0
- wquantiles==0.6
- xlsxwriter==3.2.2
prefix: /opt/conda/envs/nomenclature
| [
"tests/test_cli.py::test_cli_valid_scenarios[valid-EJ/yr-0]",
"tests/test_cli.py::test_cli_valid_scenarios[invalid-EJ-1]"
] | [
"tests/test_cli.py::test_cli_installed"
] | [
"tests/test_cli.py::test_cli_valid_yaml_path",
"tests/test_cli.py::test_cli_valid_yaml",
"tests/test_cli.py::test_cli_valid_yaml_fails",
"tests/test_cli.py::test_cli_valid_project_path",
"tests/test_cli.py::test_cli_valid_project",
"tests/test_cli.py::test_cli_invalid_region",
"tests/test_cli.py::test_cli_valid_project_fails",
"tests/test_cli.py::test_cli_non_default_folders",
"tests/test_cli.py::test_cli_non_default_folders_fails",
"tests/test_cli.py::test_cli_wrong_definitions_name",
"tests/test_cli.py::test_cli_custom_dimensions_runs",
"tests/test_cli.py::test_cli_custom_dimensions_fails",
"tests/test_cli.py::test_cli_empty_dimensions_run",
"tests/test_cli.py::test_cli_empty_dimensions_fails",
"tests/test_cli.py::test_cli_missing_mappings_runs",
"tests/test_cli.py::test_cli_missing_mappings_fails",
"tests/test_cli.py::test_cli_validate_data_fails",
"tests/test_cli.py::test_cli_empty_definitions_dir",
"tests/test_cli.py::test_check_region_aggregation",
"tests/test_cli.py::test_cli_export_to_excel",
"tests/test_cli.py::test_cli_add_missing_variables",
"tests/test_cli.py::test_cli_run_workflow"
] | [] | Apache License 2.0 | 20,021 | 307 | [
"nomenclature/cli.py"
] |
pyro-ppl__numpyro-1894 | 8ace34f1aca094897328730853b021d53b230f65 | 2024-10-23 13:58:21 | 07e4c9bc4524cc0a741ba8f8ad41339d34a0288d | diff --git a/numpyro/distributions/transforms.py b/numpyro/distributions/transforms.py
index 290e504c..05ae19ef 100644
--- a/numpyro/distributions/transforms.py
+++ b/numpyro/distributions/transforms.py
@@ -282,7 +282,7 @@ class AffineTransform(Transform):
def _get_compose_transform_input_event_dim(parts):
input_event_dim = parts[-1].domain.event_dim
- for part in parts[len(parts) - 1 :: -1]:
+ for part in parts[:-1][::-1]:
input_event_dim = part.domain.event_dim + max(
input_event_dim - part.codomain.event_dim, 0
)
| Incorrect event dimension for `ComposeTransform` if transform reduces event dimensions
An example is probably the clearest illustration. If the transform increases the number of event dimensions, everything works as expected.
```python
>>> from numpyro.distributions.transforms import CorrCholeskyTransform, ComposeTransform
>>> part = CorrCholeskyTransform()
>>> part.domain.event_dim, part.codomain.event_dim
(1, 2)
>>> composed = ComposeTransform([part])
>>> composed.domain.event_dim, composed.codomain.event_dim
(1, 2)
```
If the transform reduces the number of event dimensions, wrapping the transform in a `ComposeTransform` leads to unexpected results.
```python
>>> part = CorrCholeskyTransform().inv
>>> part.domain.event_dim, part.codomain.event_dim
(2, 1)
>>> composed = ComposeTransform([part])
>>> composed.domain.event_dim, composed.codomain.event_dim
(3, 1)
```
I had a brief look at the code below, but I couldn't quite get my head around it.
https://github.com/pyro-ppl/numpyro/blob/8ace34f1aca094897328730853b021d53b230f65/numpyro/distributions/transforms.py#L292-L298
Here's a minimal test that can be added to `test/test_transforms.py` to reproduce.
```python
@pytest.mark.parametrize("transform", [
CorrCholeskyTransform(), # passes
CorrCholeskyTransform().inv, # fails
])
def test_compose_domain_codomain(transform):
composed = ComposeTransform([transform])
assert transform.domain.event_dim == composed.domain.event_dim
assert transform.codomain.event_dim == composed.codomain.event_dim
``` | pyro-ppl/numpyro | diff --git a/test/test_transforms.py b/test/test_transforms.py
index cecbf2ca..9d8c6594 100644
--- a/test/test_transforms.py
+++ b/test/test_transforms.py
@@ -397,3 +397,16 @@ def test_biject_to(constraint, shape):
expected_shape = constrained.shape[: constrained.ndim - constraint.event_dim]
assert passed.shape == expected_shape
assert jnp.all(passed)
+
+
[email protected](
+ "transform",
+ [
+ CorrCholeskyTransform(),
+ CorrCholeskyTransform().inv,
+ ],
+)
+def test_compose_domain_codomain(transform):
+ composed = ComposeTransform([transform])
+ assert transform.domain.event_dim == composed.domain.event_dim
+ assert transform.codomain.event_dim == composed.codomain.event_dim
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev,test,doc,examples]",
"log_parser": "parse_log_pytest_v2",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-xdist"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"docs/requirements.txt"
],
"test_cmd": "pytest -vs --durations=100 --ignore=test/infer/ --ignore=test/contrib/"
} | absl-py==2.2.1
alabaster==0.7.16
anyio==4.9.0
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
arrow==1.3.0
arviz==0.17.1
asttokens==3.0.0
async-lru==2.0.5
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
chex==0.1.89
cloudpickle==3.1.1
comm==0.2.2
contourpy==1.3.0
cycler==0.12.1
debugpy==1.8.13
decorator==5.2.1
defusedxml==0.7.1
dm-haiku==0.0.13
dm-tree==0.1.8
docutils==0.21.2
etils==1.5.2
exceptiongroup==1.2.2
execnet==2.1.1
executing==2.2.0
fastjsonschema==2.21.1
flax==0.8.5
fonttools==4.56.0
fqdn==1.5.1
fsspec==2025.3.1
funsor==0.4.5
gast==0.6.0
graphviz==0.20.3
h11==0.14.0
h5netcdf==1.6.1
h5py==3.13.0
httpcore==1.0.7
httpx==0.28.1
humanize==4.12.2
idna==3.10
imagesize==1.4.1
importlib-metadata==4.13.0
importlib_resources==6.5.2
iniconfig==2.1.0
ipykernel==6.29.5
ipython==8.18.1
ipywidgets==8.1.5
isoduration==20.11.0
jax==0.4.30
jaxlib==0.4.30
jaxns==2.6.3
jaxopt==0.8.3
jedi==0.19.2
Jinja2==3.1.6
jmp==0.0.4
joblib==1.4.2
json5==0.10.0
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter==1.1.1
jupyter-console==6.6.3
jupyter-events==0.12.0
jupyter-lsp==2.2.5
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyter_server==2.15.0
jupyter_server_terminals==0.5.3
jupyterlab==4.3.6
jupyterlab_pygments==0.3.0
jupyterlab_server==2.27.3
jupyterlab_widgets==3.0.13
kiwisolver==1.4.7
makefun==1.15.6
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.9.4
matplotlib-inline==0.1.7
mdurl==0.1.2
mistune==3.1.3
ml_dtypes==0.5.1
msgpack==1.1.0
multipledispatch==1.0.0
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nbsphinx==0.9.7
nest-asyncio==1.6.0
notebook==7.3.3
notebook_shim==0.2.4
numpy==1.26.4
-e git+https://github.com/pyro-ppl/numpyro.git@8ace34f1aca094897328730853b021d53b230f65#egg=numpyro
opt_einsum==3.4.0
optax==0.2.4
orbax-checkpoint==0.6.4
overrides==7.7.0
packaging==24.2
pandas==2.2.3
pandocfilters==1.5.1
parso==0.8.4
pexpect==4.9.0
pillow==11.1.0
platformdirs==4.3.7
pluggy==1.5.0
prometheus_client==0.21.1
prompt_toolkit==3.0.50
protobuf==6.30.2
psutil==7.0.0
ptyprocess==0.7.0
pure_eval==0.2.3
pycparser==2.22
Pygments==2.19.1
pylab-sdk==1.7.2
pyparsing==3.2.3
pyro-api==0.1.2
pytest==8.3.5
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
pytz==2025.2
PyYAML==6.0.2
pyzmq==26.3.0
readthedocs-sphinx-search==0.3.2
referencing==0.36.2
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rich==14.0.0
rpds-py==0.24.0
ruff==0.11.2
scikit-learn==1.6.1
scipy==1.13.1
seaborn==0.13.2
Send2Trash==1.8.3
six==1.17.0
sniffio==1.3.1
snowballstemmer==2.2.0
soupsieve==2.6
Sphinx==7.3.7
sphinx-gallery==0.19.0
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
stack-data==0.6.3
tabulate==0.9.0
tensorflow-probability==0.25.0
tensorstore==0.1.69
terminado==0.18.1
threadpoolctl==3.6.0
tinycss2==1.4.0
tomli==2.2.1
toolz==1.0.0
tornado==6.4.2
tqdm==4.67.1
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
tzdata==2025.2
uri-template==1.3.0
urllib3==2.3.0
wcwidth==0.2.13
webcolors==24.11.1
webencodings==0.5.1
websocket-client==1.8.0
widgetsnbextension==4.0.13
wordcloud==1.9.4
xarray==2024.7.0
xarray-einstats==0.7.0
zipp==3.21.0
| name: numpyro
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- absl-py==2.2.1
- alabaster==0.7.16
- anyio==4.9.0
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- arrow==1.3.0
- arviz==0.17.1
- asttokens==3.0.0
- async-lru==2.0.5
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- chex==0.1.89
- cloudpickle==3.1.1
- comm==0.2.2
- contourpy==1.3.0
- cycler==0.12.1
- debugpy==1.8.13
- decorator==5.2.1
- defusedxml==0.7.1
- dm-haiku==0.0.13
- dm-tree==0.1.8
- docutils==0.21.2
- etils==1.5.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- executing==2.2.0
- fastjsonschema==2.21.1
- flax==0.8.5
- fonttools==4.56.0
- fqdn==1.5.1
- fsspec==2025.3.1
- funsor==0.4.5
- gast==0.6.0
- graphviz==0.20.3
- h11==0.14.0
- h5netcdf==1.6.1
- h5py==3.13.0
- httpcore==1.0.7
- httpx==0.28.1
- humanize==4.12.2
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.13.0
- importlib-resources==6.5.2
- iniconfig==2.1.0
- ipykernel==6.29.5
- ipython==8.18.1
- ipywidgets==8.1.5
- isoduration==20.11.0
- jax==0.4.30
- jaxlib==0.4.30
- jaxns==2.6.3
- jaxopt==0.8.3
- jedi==0.19.2
- jinja2==3.1.6
- jmp==0.0.4
- joblib==1.4.2
- json5==0.10.0
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter==1.1.1
- jupyter-client==8.6.3
- jupyter-console==6.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyter-lsp==2.2.5
- jupyter-server==2.15.0
- jupyter-server-terminals==0.5.3
- jupyterlab==4.3.6
- jupyterlab-pygments==0.3.0
- jupyterlab-server==2.27.3
- jupyterlab-widgets==3.0.13
- kiwisolver==1.4.7
- makefun==1.15.6
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib==3.9.4
- matplotlib-inline==0.1.7
- mdurl==0.1.2
- mistune==3.1.3
- ml-dtypes==0.5.1
- msgpack==1.1.0
- multipledispatch==1.0.0
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nbsphinx==0.9.7
- nest-asyncio==1.6.0
- notebook==7.3.3
- notebook-shim==0.2.4
- numpy==1.26.4
- numpyro==0.15.3
- opt-einsum==3.4.0
- optax==0.2.4
- orbax-checkpoint==0.6.4
- overrides==7.7.0
- packaging==24.2
- pandas==2.2.3
- pandocfilters==1.5.1
- parso==0.8.4
- pexpect==4.9.0
- pillow==11.1.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prometheus-client==0.21.1
- prompt-toolkit==3.0.50
- protobuf==6.30.2
- psutil==7.0.0
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pycparser==2.22
- pygments==2.19.1
- pylab-sdk==1.7.2
- pyparsing==3.2.3
- pyro-api==0.1.2
- pytest==8.3.5
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pytz==2025.2
- pyyaml==6.0.2
- pyzmq==26.3.0
- readthedocs-sphinx-search==0.3.2
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rich==14.0.0
- rpds-py==0.24.0
- ruff==0.11.2
- scikit-learn==1.6.1
- scipy==1.13.1
- seaborn==0.13.2
- send2trash==1.8.3
- six==1.17.0
- sniffio==1.3.1
- snowballstemmer==2.2.0
- soupsieve==2.6
- sphinx==7.3.7
- sphinx-gallery==0.19.0
- sphinx-rtd-theme==3.0.2
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- stack-data==0.6.3
- tabulate==0.9.0
- tensorflow-probability==0.25.0
- tensorstore==0.1.69
- terminado==0.18.1
- threadpoolctl==3.6.0
- tinycss2==1.4.0
- tomli==2.2.1
- toolz==1.0.0
- tornado==6.4.2
- tqdm==4.67.1
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- tzdata==2025.2
- uri-template==1.3.0
- urllib3==2.3.0
- wcwidth==0.2.13
- webcolors==24.11.1
- webencodings==0.5.1
- websocket-client==1.8.0
- widgetsnbextension==4.0.13
- wordcloud==1.9.4
- xarray==2024.7.0
- xarray-einstats==0.7.0
- zipp==3.21.0
prefix: /opt/conda/envs/numpyro
| [
"test/test_transforms.py::test_compose_domain_codomain[transform1]"
] | [] | [
"test/test_transforms.py::test_parametrized_transform_pytree[affine]",
"test/test_transforms.py::test_parametrized_transform_pytree[compose]",
"test/test_transforms.py::test_parametrized_transform_pytree[independent]",
"test/test_transforms.py::test_parametrized_transform_pytree[lower_cholesky_affine]",
"test/test_transforms.py::test_parametrized_transform_pytree[permute]",
"test/test_transforms.py::test_parametrized_transform_pytree[power]",
"test/test_transforms.py::test_parametrized_transform_pytree[rfft]",
"test/test_transforms.py::test_parametrized_transform_pytree[recursive_linear]",
"test/test_transforms.py::test_parametrized_transform_pytree[simplex_to_ordered]",
"test/test_transforms.py::test_parametrized_transform_pytree[unpack]",
"test/test_transforms.py::test_parametrized_transform_pytree[abs]",
"test/test_transforms.py::test_parametrized_transform_pytree[cholesky]",
"test/test_transforms.py::test_parametrized_transform_pytree[corr_chol]",
"test/test_transforms.py::test_parametrized_transform_pytree[corr_matrix_chol]",
"test/test_transforms.py::test_parametrized_transform_pytree[exp]",
"test/test_transforms.py::test_parametrized_transform_pytree[identity]",
"test/test_transforms.py::test_parametrized_transform_pytree[l1_ball]",
"test/test_transforms.py::test_parametrized_transform_pytree[lower_cholesky]",
"test/test_transforms.py::test_parametrized_transform_pytree[ordered]",
"test/test_transforms.py::test_parametrized_transform_pytree[scaled_unit_lower_cholesky]",
"test/test_transforms.py::test_parametrized_transform_pytree[sigmoid]",
"test/test_transforms.py::test_parametrized_transform_pytree[softplus]",
"test/test_transforms.py::test_parametrized_transform_pytree[softplus_lower_cholesky]",
"test/test_transforms.py::test_parametrized_transform_pytree[stick_breaking]",
"test/test_transforms.py::test_parametrized_transform_pytree[iaf]",
"test/test_transforms.py::test_parametrized_transform_pytree[bna]",
"test/test_transforms.py::test_parametrized_transform_pytree[reshape]",
"test/test_transforms.py::test_parametrized_transform_pytree[zero_sum]",
"test/test_transforms.py::test_parametrized_transform_eq[affine]",
"test/test_transforms.py::test_parametrized_transform_eq[compose]",
"test/test_transforms.py::test_parametrized_transform_eq[independent]",
"test/test_transforms.py::test_parametrized_transform_eq[lower_cholesky_affine]",
"test/test_transforms.py::test_parametrized_transform_eq[permute]",
"test/test_transforms.py::test_parametrized_transform_eq[power]",
"test/test_transforms.py::test_parametrized_transform_eq[rfft]",
"test/test_transforms.py::test_parametrized_transform_eq[recursive_linear]",
"test/test_transforms.py::test_parametrized_transform_eq[simplex_to_ordered]",
"test/test_transforms.py::test_parametrized_transform_eq[unpack]",
"test/test_transforms.py::test_parametrized_transform_eq[abs]",
"test/test_transforms.py::test_parametrized_transform_eq[cholesky]",
"test/test_transforms.py::test_parametrized_transform_eq[corr_chol]",
"test/test_transforms.py::test_parametrized_transform_eq[corr_matrix_chol]",
"test/test_transforms.py::test_parametrized_transform_eq[exp]",
"test/test_transforms.py::test_parametrized_transform_eq[identity]",
"test/test_transforms.py::test_parametrized_transform_eq[l1_ball]",
"test/test_transforms.py::test_parametrized_transform_eq[lower_cholesky]",
"test/test_transforms.py::test_parametrized_transform_eq[ordered]",
"test/test_transforms.py::test_parametrized_transform_eq[scaled_unit_lower_cholesky]",
"test/test_transforms.py::test_parametrized_transform_eq[sigmoid]",
"test/test_transforms.py::test_parametrized_transform_eq[softplus]",
"test/test_transforms.py::test_parametrized_transform_eq[softplus_lower_cholesky]",
"test/test_transforms.py::test_parametrized_transform_eq[stick_breaking]",
"test/test_transforms.py::test_parametrized_transform_eq[iaf]",
"test/test_transforms.py::test_parametrized_transform_eq[bna]",
"test/test_transforms.py::test_parametrized_transform_eq[reshape]",
"test/test_transforms.py::test_parametrized_transform_eq[zero_sum]",
"test/test_transforms.py::test_reshape_transform[forward_shape0-inverse_shape0-batch_shape0]",
"test/test_transforms.py::test_reshape_transform[forward_shape1-inverse_shape1-batch_shape1]",
"test/test_transforms.py::test_reshape_transform[forward_shape2-inverse_shape2-batch_shape2]",
"test/test_transforms.py::test_reshape_transform[forward_shape3-inverse_shape3-batch_shape3]",
"test/test_transforms.py::test_reshape_transform_invalid",
"test/test_transforms.py::test_real_fast_fourier_transform[input_shape0-None-1]",
"test/test_transforms.py::test_real_fast_fourier_transform[input_shape1-11-1]",
"test/test_transforms.py::test_real_fast_fourier_transform[input_shape2-None-2]",
"test/test_transforms.py::test_real_fast_fourier_transform[input_shape3-shape3-2]",
"test/test_transforms.py::test_bijective_transforms[transform0-shape0]",
"test/test_transforms.py::test_bijective_transforms[transform1-shape1]",
"test/test_transforms.py::test_bijective_transforms[transform2-shape2]",
"test/test_transforms.py::test_bijective_transforms[transform3-shape3]",
"test/test_transforms.py::test_bijective_transforms[transform4-shape4]",
"test/test_transforms.py::test_bijective_transforms[transform5-shape5]",
"test/test_transforms.py::test_bijective_transforms[transform6-shape6]",
"test/test_transforms.py::test_bijective_transforms[transform7-shape7]",
"test/test_transforms.py::test_bijective_transforms[transform8-shape8]",
"test/test_transforms.py::test_bijective_transforms[transform9-shape9]",
"test/test_transforms.py::test_bijective_transforms[transform10-shape10]",
"test/test_transforms.py::test_bijective_transforms[transform11-shape11]",
"test/test_transforms.py::test_bijective_transforms[transform12-shape12]",
"test/test_transforms.py::test_bijective_transforms[transform13-shape13]",
"test/test_transforms.py::test_bijective_transforms[transform14-shape14]",
"test/test_transforms.py::test_bijective_transforms[transform15-shape15]",
"test/test_transforms.py::test_bijective_transforms[transform16-shape16]",
"test/test_transforms.py::test_bijective_transforms[transform17-shape17]",
"test/test_transforms.py::test_bijective_transforms[transform18-shape18]",
"test/test_transforms.py::test_bijective_transforms[transform19-shape19]",
"test/test_transforms.py::test_bijective_transforms[transform20-shape20]",
"test/test_transforms.py::test_bijective_transforms[transform21-shape21]",
"test/test_transforms.py::test_bijective_transforms[transform22-shape22]",
"test/test_transforms.py::test_bijective_transforms[transform23-shape23]",
"test/test_transforms.py::test_bijective_transforms[transform24-shape24]",
"test/test_transforms.py::test_bijective_transforms[transform25-shape25]",
"test/test_transforms.py::test_batched_recursive_linear_transform",
"test/test_transforms.py::test_biject_to[Complex()-(3,)]",
"test/test_transforms.py::test_biject_to[CorrMatrix()-(15,)]",
"test/test_transforms.py::test_biject_to[GreaterThan(lower_bound=3)-()]",
"test/test_transforms.py::test_biject_to[GreaterThanEq(lower_bound=3)-()]",
"test/test_transforms.py::test_biject_to[L1Ball()-(4,)]",
"test/test_transforms.py::test_biject_to[LessThan(upper_bound=-1)-()]",
"test/test_transforms.py::test_biject_to[LessThanEq(upper_bound=-1)-()]",
"test/test_transforms.py::test_biject_to[LowerCholesky()-(15,)]",
"test/test_transforms.py::test_biject_to[OrderedVector()-(5,)]",
"test/test_transforms.py::test_biject_to[PositiveDefinite()-(6,)]",
"test/test_transforms.py::test_biject_to[PositiveSemiDefinite()-(6,)]",
"test/test_transforms.py::test_biject_to[PositiveOrderedVector()-(7,)]",
"test/test_transforms.py::test_biject_to[Positive(lower_bound=0.0)-(7,)]",
"test/test_transforms.py::test_biject_to[Real()-(3,)]",
"test/test_transforms.py::test_biject_to[ScaledUnitLowerCholesky()-(15,)]",
"test/test_transforms.py::test_biject_to[Simplex()-(3,)]",
"test/test_transforms.py::test_biject_to[SoftplusLowerCholesky()-(15,)]",
"test/test_transforms.py::test_biject_to[SoftplusPositive(lower_bound=0.0)-(2,)]",
"test/test_transforms.py::test_biject_to[Nonnegative(lower_bound=0.0)-(7,)]",
"test/test_transforms.py::test_compose_domain_codomain[transform0]"
] | [] | Apache License 2.0 | 20,030 | 169 | [
"numpyro/distributions/transforms.py"
] |
|
zarr-developers__zarr-python-2434 | 6ce05265472771e922e69012105d2210e3405aa9 | 2024-10-23 15:26:50 | e602aa1d19f26bb06669994231e524c55bcecbeb | dcherian: Is the shape `(1, 0, 1)` supported too?
brokkoli71: @dcherian Yes,` (1, 0, 1)` is supported
dcherian: property tests fixed in https://github.com/scalableminds/zarr-python/pull/4 | diff --git a/src/zarr/core/indexing.py b/src/zarr/core/indexing.py
index f1d5fd16..1873d5c8 100644
--- a/src/zarr/core/indexing.py
+++ b/src/zarr/core/indexing.py
@@ -94,6 +94,8 @@ class Indexer(Protocol):
def ceildiv(a: float, b: float) -> int:
+ if a == 0:
+ return 0
return math.ceil(a / b)
@@ -374,7 +376,7 @@ class SliceDimIndexer:
def __iter__(self) -> Iterator[ChunkDimProjection]:
# figure out the range of chunks we need to visit
- dim_chunk_ix_from = self.start // self.dim_chunk_len
+ dim_chunk_ix_from = 0 if self.start == 0 else self.start // self.dim_chunk_len
dim_chunk_ix_to = ceildiv(self.stop, self.dim_chunk_len)
# iterate over chunks in range
diff --git a/src/zarr/core/metadata/v3.py b/src/zarr/core/metadata/v3.py
index 6b6f28dd..7a38e9fd 100644
--- a/src/zarr/core/metadata/v3.py
+++ b/src/zarr/core/metadata/v3.py
@@ -43,6 +43,13 @@ from zarr.registry import get_codec_class
DEFAULT_DTYPE = "float64"
+# Keep in sync with _replace_special_floats
+SPECIAL_FLOATS_ENCODED = {
+ "Infinity": np.inf,
+ "-Infinity": -np.inf,
+ "NaN": np.nan,
+}
+
def parse_zarr_format(data: object) -> Literal[3]:
if data == 3:
@@ -149,7 +156,7 @@ class V3JsonEncoder(json.JSONEncoder):
if isinstance(out, complex):
# python complex types are not JSON serializable, so we use the
# serialization defined in the zarr v3 spec
- return [out.real, out.imag]
+ return _replace_special_floats([out.real, out.imag])
elif np.isnan(out):
return "NaN"
elif np.isinf(out):
@@ -447,8 +454,11 @@ def parse_fill_value(
if isinstance(fill_value, Sequence) and not isinstance(fill_value, str):
if data_type in (DataType.complex64, DataType.complex128):
if len(fill_value) == 2:
+ decoded_fill_value = tuple(
+ SPECIAL_FLOATS_ENCODED.get(value, value) for value in fill_value
+ )
# complex datatypes serialize to JSON arrays with two elements
- return np_dtype.type(complex(*fill_value))
+ return np_dtype.type(complex(*decoded_fill_value))
else:
msg = (
f"Got an invalid fill value for complex data type {data_type.value}."
@@ -475,12 +485,20 @@ def parse_fill_value(
pass
elif fill_value in ["Infinity", "-Infinity"] and not np.isfinite(casted_value):
pass
- elif np_dtype.kind in "cf":
+ elif np_dtype.kind == "f":
# float comparison is not exact, especially when dtype <float64
- # so we us np.isclose for this comparison.
+ # so we use np.isclose for this comparison.
# this also allows us to compare nan fill_values
if not np.isclose(fill_value, casted_value, equal_nan=True):
raise ValueError(f"fill value {fill_value!r} is not valid for dtype {data_type}")
+ elif np_dtype.kind == "c":
+ # confusingly np.isclose(np.inf, np.inf + 0j) is False on numpy<2, so compare real and imag parts
+ # explicitly.
+ if not (
+ np.isclose(np.real(fill_value), np.real(casted_value), equal_nan=True)
+ and np.isclose(np.imag(fill_value), np.imag(casted_value), equal_nan=True)
+ ):
+ raise ValueError(f"fill value {fill_value!r} is not valid for dtype {data_type}")
else:
if fill_value != casted_value:
raise ValueError(f"fill value {fill_value!r} is not valid for dtype {data_type}")
| [v3] Support zero-sized arrays
### Zarr version
3.0.0a1
### Numcodecs version
0.12.1
### Python Version
3.11.9
### Operating System
Mac
### Installation
pip
### Description
Zero-sized arrays (i.e. arrays where one dimension is zero) are supported in v2, so they should be in v3 too.
### Steps to reproduce
```python
>>> import zarr.v2 as zarr
>>> z = zarr.open(store='example-v2.zarr', mode='w', shape=(3, 0))
>>> z[:] = 2
>>> z[:]
array([], shape=(3, 0), dtype=float64)
>>> import zarr
>>> z = zarr.open(store='example-v3.zarr', mode='w', shape=(3, 0))
>>> z[:] = 3
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/tom/workspace/zarr-python/src/zarr/array.py", line 720, in __setitem__
self.set_basic_selection(cast(BasicSelection, pure_selection), value, fields=fields)
File "/Users/tom/workspace/zarr-python/src/zarr/array.py", line 750, in set_basic_selection
indexer = BasicIndexer(selection, self.shape, self.metadata.chunk_grid)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/tom/workspace/zarr-python/src/zarr/indexing.py", line 463, in __init__
dim_indexer = SliceDimIndexer(dim_sel, dim_len, dim_chunk_len)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/tom/workspace/zarr-python/src/zarr/indexing.py", line 287, in __init__
object.__setattr__(self, "nchunks", ceildiv(dim_len, dim_chunk_len))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/tom/workspace/zarr-python/src/zarr/indexing.py", line 100, in ceildiv
return math.ceil(a / b)
~~^~~
ZeroDivisionError: division by zero
```
### Additional output
_No response_ | zarr-developers/zarr-python | diff --git a/src/zarr/testing/strategies.py b/src/zarr/testing/strategies.py
index 2c17fbf7..c82e168c 100644
--- a/src/zarr/testing/strategies.py
+++ b/src/zarr/testing/strategies.py
@@ -65,7 +65,7 @@ paths = st.just("/") | keys
stores = st.builds(MemoryStore, st.just({}), mode=st.just("w"))
compressors = st.sampled_from([None, "default"])
zarr_formats: st.SearchStrategy[Literal[2, 3]] = st.sampled_from([2, 3])
-array_shapes = npst.array_shapes(max_dims=4)
+array_shapes = npst.array_shapes(max_dims=4, min_side=0)
@st.composite # type: ignore[misc]
@@ -85,7 +85,7 @@ def numpy_arrays(
@st.composite # type: ignore[misc]
def np_array_and_chunks(
draw: st.DrawFn, *, arrays: st.SearchStrategy[np.ndarray] = numpy_arrays
-) -> tuple[np.ndarray, tuple[int]]: # type: ignore[type-arg]
+) -> tuple[np.ndarray, tuple[int, ...]]: # type: ignore[type-arg]
"""A hypothesis strategy to generate small sized random arrays.
Returns: a tuple of the array and a suitable random chunking for it.
@@ -93,9 +93,16 @@ def np_array_and_chunks(
array = draw(arrays)
# We want this strategy to shrink towards arrays with smaller number of chunks
# 1. st.integers() shrinks towards smaller values. So we use that to generate number of chunks
- numchunks = draw(st.tuples(*[st.integers(min_value=1, max_value=size) for size in array.shape]))
+ numchunks = draw(
+ st.tuples(
+ *[st.integers(min_value=0 if size == 0 else 1, max_value=size) for size in array.shape]
+ )
+ )
# 2. and now generate the chunks tuple
- chunks = tuple(size // nchunks for size, nchunks in zip(array.shape, numchunks, strict=True))
+ chunks = tuple(
+ size // nchunks if nchunks > 0 else 0
+ for size, nchunks in zip(array.shape, numchunks, strict=True)
+ )
return (array, chunks)
diff --git a/tests/test_array.py b/tests/test_array.py
index ae8e7f99..6451c7fe 100644
--- a/tests/test_array.py
+++ b/tests/test_array.py
@@ -1,3 +1,5 @@
+import json
+import math
import pickle
from itertools import accumulate
from typing import Any, Literal
@@ -9,6 +11,7 @@ import zarr.api.asynchronous
from zarr import Array, AsyncArray, Group
from zarr.codecs import BytesCodec, VLenBytesCodec
from zarr.core.array import chunks_initialized
+from zarr.core.buffer import default_buffer_prototype
from zarr.core.buffer.cpu import NDBuffer
from zarr.core.common import JSON, MemoryOrder, ZarrFormat
from zarr.core.group import AsyncGroup
@@ -624,3 +627,23 @@ def test_array_create_order(
assert vals.flags.f_contiguous
else:
raise AssertionError
+
+
[email protected](
+ ("fill_value", "expected"),
+ [
+ (np.nan * 1j, ["NaN", "NaN"]),
+ (np.nan, ["NaN", 0.0]),
+ (np.inf, ["Infinity", 0.0]),
+ (np.inf * 1j, ["NaN", "Infinity"]),
+ (-np.inf, ["-Infinity", 0.0]),
+ (math.inf, ["Infinity", 0.0]),
+ ],
+)
+async def test_special_complex_fill_values_roundtrip(fill_value: Any, expected: list[Any]) -> None:
+ store = MemoryStore({}, mode="w")
+ Array.create(store=store, shape=(1,), dtype=np.complex64, fill_value=fill_value)
+ content = await store.get("zarr.json", prototype=default_buffer_prototype())
+ assert content is not None
+ actual = json.loads(content.to_bytes())
+ assert actual["fill_value"] == expected
diff --git a/tests/test_indexing.py b/tests/test_indexing.py
index b3a19906..2c51f3da 100644
--- a/tests/test_indexing.py
+++ b/tests/test_indexing.py
@@ -11,6 +11,7 @@ import pytest
from numpy.testing import assert_array_equal
import zarr
+from zarr import Array
from zarr.core.buffer import BufferPrototype, default_buffer_prototype
from zarr.core.indexing import (
BasicSelection,
@@ -31,7 +32,6 @@ from zarr.storage.memory import MemoryStore
if TYPE_CHECKING:
from collections.abc import AsyncGenerator
- from zarr.core.array import Array
from zarr.core.buffer.core import Buffer
from zarr.core.common import ChunkCoords
@@ -1927,3 +1927,11 @@ def test_indexing_with_zarr_array(store: StorePath) -> None:
assert_array_equal(a[ii], za[zii])
assert_array_equal(a[ii], za.oindex[zii])
+
+
[email protected]("store", ["local", "memory"], indirect=["store"])
[email protected]("shape", [(0, 2, 3), (0), (3, 0)])
+def test_zero_sized_chunks(store: StorePath, shape: list[int]) -> None:
+ z = Array.create(store=store, shape=shape, chunk_shape=shape, zarr_format=3, dtype="f8")
+ z[...] = 42
+ assert_array_equal(z[...], np.zeros(shape, dtype="f8"))
diff --git a/tests/test_properties.py b/tests/test_properties.py
index 380a4d85..f70753ce 100644
--- a/tests/test_properties.py
+++ b/tests/test_properties.py
@@ -6,7 +6,7 @@ pytest.importorskip("hypothesis")
import hypothesis.extra.numpy as npst # noqa: E402
import hypothesis.strategies as st # noqa: E402
-from hypothesis import given # noqa: E402
+from hypothesis import assume, given # noqa: E402
from zarr.testing.strategies import arrays, basic_indices, numpy_arrays, zarr_formats # noqa: E402
@@ -35,11 +35,13 @@ def test_basic_indexing(data: st.DataObject) -> None:
@given(data=st.data())
def test_vindex(data: st.DataObject) -> None:
zarray = data.draw(arrays())
+ # integer_array_indices can't handle 0-size dimensions.
+ assume(all(s > 0 for s in zarray.shape))
nparray = zarray[:]
indexer = data.draw(
npst.integer_array_indices(
- shape=nparray.shape, result_shape=npst.array_shapes(max_dims=None)
+ shape=nparray.shape, result_shape=npst.array_shapes(min_side=1, max_dims=None)
)
)
actual = zarray.vindex[indexer]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 3.0 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-asyncio",
"moto[s3]",
"mypy",
"hypothesis",
"universal-pathlib"
],
"pre_install": [],
"python": "3.11",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asciitree==0.3.3
attrs==25.3.0
boto3==1.37.23
botocore==1.37.23
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
coverage==7.8.0
crc32c==2.7.1
cryptography==44.0.2
Deprecated==1.2.18
donfig==0.8.1.post1
fsspec==2025.3.1
hypothesis==6.130.5
idna==3.10
iniconfig==2.1.0
Jinja2==3.1.6
jmespath==1.0.1
MarkupSafe==3.0.2
moto==5.1.2
mypy==1.15.0
mypy-extensions==1.0.0
numcodecs==0.15.1
numpy==2.2.4
packaging==24.2
pluggy==1.5.0
py-partiql-parser==0.6.1
pycparser==2.22
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
responses==0.25.7
s3transfer==0.11.4
six==1.17.0
sortedcontainers==2.4.0
typing_extensions==4.13.0
universal_pathlib==0.2.6
urllib3==2.3.0
Werkzeug==3.1.3
wrapt==1.17.2
xmltodict==0.14.2
-e git+https://github.com/zarr-developers/zarr-python.git@6ce05265472771e922e69012105d2210e3405aa9#egg=zarr
| name: zarr-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py311h06a4308_0
- python=3.11.11=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py311h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py311h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asciitree==0.3.3
- attrs==25.3.0
- boto3==1.37.23
- botocore==1.37.23
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- coverage==7.8.0
- crc32c==2.7.1
- cryptography==44.0.2
- deprecated==1.2.18
- donfig==0.8.1.post1
- fsspec==2025.3.1
- hypothesis==6.130.5
- idna==3.10
- iniconfig==2.1.0
- jinja2==3.1.6
- jmespath==1.0.1
- markupsafe==3.0.2
- moto==5.1.2
- mypy==1.15.0
- mypy-extensions==1.0.0
- numcodecs==0.15.1
- numpy==2.2.4
- packaging==24.2
- pluggy==1.5.0
- py-partiql-parser==0.6.1
- pycparser==2.22
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- responses==0.25.7
- s3transfer==0.11.4
- six==1.17.0
- sortedcontainers==2.4.0
- typing-extensions==4.13.0
- universal-pathlib==0.2.6
- urllib3==2.3.0
- werkzeug==3.1.3
- wrapt==1.17.2
- xmltodict==0.14.2
- zarr==3.0.0b2.dev11+g6ce05265
prefix: /opt/conda/envs/zarr-python
| [
"tests/test_array.py::test_special_complex_fill_values_roundtrip[(nan+nanj)-expected0]",
"tests/test_array.py::test_special_complex_fill_values_roundtrip[nan-expected1]",
"tests/test_array.py::test_special_complex_fill_values_roundtrip[inf-expected2]",
"tests/test_array.py::test_special_complex_fill_values_roundtrip[(nan+infj)-expected3]",
"tests/test_array.py::test_special_complex_fill_values_roundtrip[-inf-expected4]",
"tests/test_array.py::test_special_complex_fill_values_roundtrip[inf-expected5]",
"tests/test_indexing.py::test_zero_sized_chunks[shape0-local]",
"tests/test_indexing.py::test_zero_sized_chunks[shape0-memory]",
"tests/test_indexing.py::test_zero_sized_chunks[0-local]",
"tests/test_indexing.py::test_zero_sized_chunks[0-memory]",
"tests/test_indexing.py::test_zero_sized_chunks[shape2-local]",
"tests/test_indexing.py::test_zero_sized_chunks[shape2-memory]",
"tests/test_properties.py::test_roundtrip",
"tests/test_properties.py::test_basic_indexing",
"tests/test_properties.py::test_vindex"
] | [] | [
"tests/test_array.py::test_array_creation_existing_node[array-True-2-local]",
"tests/test_array.py::test_array_creation_existing_node[array-True-2-memory]",
"tests/test_array.py::test_array_creation_existing_node[array-True-2-zip]",
"tests/test_array.py::test_array_creation_existing_node[array-True-3-local]",
"tests/test_array.py::test_array_creation_existing_node[array-True-3-memory]",
"tests/test_array.py::test_array_creation_existing_node[array-True-3-zip]",
"tests/test_array.py::test_array_creation_existing_node[array-False-2-local]",
"tests/test_array.py::test_array_creation_existing_node[array-False-2-memory]",
"tests/test_array.py::test_array_creation_existing_node[array-False-2-zip]",
"tests/test_array.py::test_array_creation_existing_node[array-False-3-local]",
"tests/test_array.py::test_array_creation_existing_node[array-False-3-memory]",
"tests/test_array.py::test_array_creation_existing_node[array-False-3-zip]",
"tests/test_array.py::test_array_creation_existing_node[group-True-2-local]",
"tests/test_array.py::test_array_creation_existing_node[group-True-2-memory]",
"tests/test_array.py::test_array_creation_existing_node[group-True-2-zip]",
"tests/test_array.py::test_array_creation_existing_node[group-True-3-local]",
"tests/test_array.py::test_array_creation_existing_node[group-True-3-memory]",
"tests/test_array.py::test_array_creation_existing_node[group-True-3-zip]",
"tests/test_array.py::test_array_creation_existing_node[group-False-2-local]",
"tests/test_array.py::test_array_creation_existing_node[group-False-2-memory]",
"tests/test_array.py::test_array_creation_existing_node[group-False-2-zip]",
"tests/test_array.py::test_array_creation_existing_node[group-False-3-local]",
"tests/test_array.py::test_array_creation_existing_node[group-False-3-memory]",
"tests/test_array.py::test_array_creation_existing_node[group-False-3-zip]",
"tests/test_array.py::test_create_creates_parents[2-local]",
"tests/test_array.py::test_create_creates_parents[2-memory]",
"tests/test_array.py::test_create_creates_parents[2-zip]",
"tests/test_array.py::test_create_creates_parents[3-local]",
"tests/test_array.py::test_create_creates_parents[3-memory]",
"tests/test_array.py::test_create_creates_parents[3-zip]",
"tests/test_array.py::test_array_name_properties_no_group[2-local]",
"tests/test_array.py::test_array_name_properties_no_group[2-memory]",
"tests/test_array.py::test_array_name_properties_no_group[2-zip]",
"tests/test_array.py::test_array_name_properties_no_group[3-local]",
"tests/test_array.py::test_array_name_properties_no_group[3-memory]",
"tests/test_array.py::test_array_name_properties_no_group[3-zip]",
"tests/test_array.py::test_array_name_properties_with_group[2-local]",
"tests/test_array.py::test_array_name_properties_with_group[2-memory]",
"tests/test_array.py::test_array_name_properties_with_group[2-zip]",
"tests/test_array.py::test_array_name_properties_with_group[3-local]",
"tests/test_array.py::test_array_name_properties_with_group[3-memory]",
"tests/test_array.py::test_array_name_properties_with_group[3-zip]",
"tests/test_array.py::test_array_v3_fill_value_default[bool-True-memory]",
"tests/test_array.py::test_array_v3_fill_value_default[bool-False-memory]",
"tests/test_array.py::test_array_v3_fill_value_default[uint8-True-memory]",
"tests/test_array.py::test_array_v3_fill_value_default[uint8-False-memory]",
"tests/test_array.py::test_array_v3_fill_value_default[complex64-True-memory]",
"tests/test_array.py::test_array_v3_fill_value_default[complex64-False-memory]",
"tests/test_array.py::test_array_v3_fill_value[bool-True-memory]",
"tests/test_array.py::test_array_v3_fill_value[uint8-99-memory]",
"tests/test_array.py::test_array_v3_fill_value[float32--99.9-memory]",
"tests/test_array.py::test_array_v3_fill_value[complex64-(3+4j)-memory]",
"tests/test_array.py::test_create_positional_args_deprecated",
"tests/test_array.py::test_selection_positional_args_deprecated",
"tests/test_array.py::test_array_v3_nan_fill_value[memory]",
"tests/test_array.py::test_serializable_async_array[2-local]",
"tests/test_array.py::test_serializable_async_array[3-local]",
"tests/test_array.py::test_serializable_sync_array[2-local]",
"tests/test_array.py::test_serializable_sync_array[3-local]",
"tests/test_array.py::test_storage_transformers[memory]",
"tests/test_array.py::test_nchunks[2-Array]",
"tests/test_array.py::test_nchunks[2-AsyncArray]",
"tests/test_array.py::test_nchunks[5-Array]",
"tests/test_array.py::test_nchunks[5-AsyncArray]",
"tests/test_array.py::test_nchunks[10-Array]",
"tests/test_array.py::test_nchunks[10-AsyncArray]",
"tests/test_array.py::test_nchunks_initialized[Array]",
"tests/test_array.py::test_nchunks_initialized[AsyncArray]",
"tests/test_array.py::test_chunks_initialized[Array]",
"tests/test_array.py::test_chunks_initialized[AsyncArray]",
"tests/test_array.py::test_default_fill_values",
"tests/test_array.py::test_vlen_errors",
"tests/test_array.py::test_update_attrs[2]",
"tests/test_array.py::test_update_attrs[3]",
"tests/test_array.py::test_resize_1d[2-memory]",
"tests/test_array.py::test_resize_1d[3-memory]",
"tests/test_array.py::test_resize_2d[2-memory]",
"tests/test_array.py::test_resize_2d[3-memory]",
"tests/test_array.py::test_append_1d[2-memory]",
"tests/test_array.py::test_append_1d[3-memory]",
"tests/test_array.py::test_append_2d[2-memory]",
"tests/test_array.py::test_append_2d[3-memory]",
"tests/test_array.py::test_append_2d_axis[2-memory]",
"tests/test_array.py::test_append_2d_axis[3-memory]",
"tests/test_array.py::test_append_bad_shape[2-memory]",
"tests/test_array.py::test_append_bad_shape[3-memory]",
"tests/test_array.py::test_array_create_order[memory-2-C]",
"tests/test_array.py::test_array_create_order[memory-2-F]",
"tests/test_array.py::test_array_create_order[memory-2-None]",
"tests/test_array.py::test_array_create_order[memory-3-C]",
"tests/test_array.py::test_array_create_order[memory-3-F]",
"tests/test_array.py::test_array_create_order[memory-3-None]",
"tests/test_indexing.py::test_normalize_integer_selection",
"tests/test_indexing.py::test_replace_ellipsis",
"tests/test_indexing.py::test_get_basic_selection_0d[True-42-uint8]",
"tests/test_indexing.py::test_get_basic_selection_0d[False-42-uint8]",
"tests/test_indexing.py::test_get_basic_selection_1d",
"tests/test_indexing.py::test_get_basic_selection_2d",
"tests/test_indexing.py::test_fancy_indexing_fallback_on_get_setitem",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index0-expected_result0]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index1-expected_result1]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index2-expected_result2]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index3-expected_result3]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index4-expected_result4]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index5-expected_result5]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index6-expected_result6]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_3d[index0-expected_result0]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_3d[index1-expected_result1]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_3d[index2-expected_result2]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_3d[index3-expected_result3]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_setitem_2d[index0-expected_result0]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_setitem_2d[index1-expected_result1]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_setitem_2d[index2-expected_result2]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_setitem_2d[index3-expected_result3]",
"tests/test_indexing.py::test_fancy_indexing_doesnt_mix_with_implicit_slicing",
"tests/test_indexing.py::test_set_basic_selection_0d[42-uint8]",
"tests/test_indexing.py::test_get_orthogonal_selection_1d_bool",
"tests/test_indexing.py::test_get_orthogonal_selection_1d_int",
"tests/test_indexing.py::test_get_orthogonal_selection_2d",
"tests/test_indexing.py::test_get_orthogonal_selection_3d",
"tests/test_indexing.py::test_orthogonal_indexing_edge_cases",
"tests/test_indexing.py::test_set_orthogonal_selection_1d",
"tests/test_indexing.py::test_set_orthogonal_selection_2d",
"tests/test_indexing.py::test_set_orthogonal_selection_3d",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_get_setitem",
"tests/test_indexing.py::test_get_coordinate_selection_1d",
"tests/test_indexing.py::test_get_coordinate_selection_2d",
"tests/test_indexing.py::test_set_coordinate_selection_1d",
"tests/test_indexing.py::test_set_coordinate_selection_2d",
"tests/test_indexing.py::test_get_block_selection_1d",
"tests/test_indexing.py::test_get_block_selection_2d",
"tests/test_indexing.py::test_set_block_selection_1d",
"tests/test_indexing.py::test_set_block_selection_2d",
"tests/test_indexing.py::test_get_mask_selection_1d",
"tests/test_indexing.py::test_get_mask_selection_2d",
"tests/test_indexing.py::test_set_mask_selection_1d",
"tests/test_indexing.py::test_set_mask_selection_2d",
"tests/test_indexing.py::test_get_selection_out",
"tests/test_indexing.py::test_slice_selection_uints",
"tests/test_indexing.py::test_numpy_int_indexing",
"tests/test_indexing.py::test_accessed_chunks[shape0-chunks0-ops0]",
"tests/test_indexing.py::test_accessed_chunks[shape1-chunks1-ops1]",
"tests/test_indexing.py::test_accessed_chunks[shape2-chunks2-ops2]",
"tests/test_indexing.py::test_accessed_chunks[shape3-chunks3-ops3]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection0]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection1]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection2]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection3]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection4]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection5]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection6]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection7]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection8]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection9]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection10]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection11]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection12]",
"tests/test_indexing.py::test_orthogonal_bool_indexing_like_numpy_ix[selection0]",
"tests/test_indexing.py::test_orthogonal_bool_indexing_like_numpy_ix[selection1]",
"tests/test_indexing.py::test_orthogonal_bool_indexing_like_numpy_ix[selection2]",
"tests/test_indexing.py::test_orthogonal_bool_indexing_like_numpy_ix[selection3]",
"tests/test_indexing.py::test_iter_grid[None-None-1]",
"tests/test_indexing.py::test_iter_grid[None-None-2]",
"tests/test_indexing.py::test_iter_grid[None-None-3]",
"tests/test_indexing.py::test_iter_grid[None-origin_0d1-1]",
"tests/test_indexing.py::test_iter_grid[None-origin_0d1-2]",
"tests/test_indexing.py::test_iter_grid[None-origin_0d1-3]",
"tests/test_indexing.py::test_iter_grid[None-origin_0d2-1]",
"tests/test_indexing.py::test_iter_grid[None-origin_0d2-2]",
"tests/test_indexing.py::test_iter_grid[None-origin_0d2-3]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-None-1]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-None-2]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-None-3]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-origin_0d1-1]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-origin_0d1-2]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-origin_0d1-3]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-origin_0d2-1]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-origin_0d2-2]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-origin_0d2-3]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-None-1]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-None-2]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-None-3]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-origin_0d1-1]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-origin_0d1-2]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-origin_0d1-3]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-origin_0d2-1]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-origin_0d2-2]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-origin_0d2-3]",
"tests/test_indexing.py::test_iter_grid_invalid",
"tests/test_indexing.py::test_indexing_with_zarr_array"
] | [] | MIT License | 20,034 | 978 | [
"src/zarr/core/indexing.py",
"src/zarr/core/metadata/v3.py"
] |
statsmodels__statsmodels-9407 | f29be609729882446846a930bec1afbdc78a0d2b | 2024-10-24 08:03:43 | 831e26ca5dfb087658500f08236a30b99b3f4501 | diff --git a/statsmodels/base/data.py b/statsmodels/base/data.py
index 61cada0b6..8cbf5eb6e 100644
--- a/statsmodels/base/data.py
+++ b/statsmodels/base/data.py
@@ -2,6 +2,7 @@
Base tools for handling various kinds of data structures, attaching metadata to
results, and doing data cleaning
"""
+
from __future__ import annotations
from statsmodels.compat.python import lmap
@@ -9,7 +10,7 @@ from statsmodels.compat.python import lmap
from functools import reduce
import numpy as np
-from pandas import DataFrame, Series, isnull, MultiIndex
+from pandas import DataFrame, MultiIndex, Series, isnull
import statsmodels.tools.data as data_util
from statsmodels.tools.decorators import cache_readonly, cache_writable
@@ -26,8 +27,8 @@ def _asarray_2d_null_rows(x):
Makes sure input is an array and is 2d. Makes sure output is 2d. True
indicates a null in the rows of 2d x.
"""
- #Have to have the asarrays because isnull does not account for array_like
- #input
+ # Have to have the asarrays because isnull does not account for array_like
+ # input
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
@@ -45,9 +46,11 @@ def _nan_rows(*arrs):
def _nan_row_maybe_two_inputs(x, y):
# check for dtype bc dataframe has dtypes
- x_is_boolean_array = hasattr(x, 'dtype') and x.dtype == bool and x
- return np.logical_or(_asarray_2d_null_rows(x),
- (x_is_boolean_array | _asarray_2d_null_rows(y)))
+ x_is_boolean_array = hasattr(x, "dtype") and x.dtype == bool and x
+ return np.logical_or(
+ _asarray_2d_null_rows(x), (x_is_boolean_array | _asarray_2d_null_rows(y))
+ )
+
return reduce(_nan_row_maybe_two_inputs, arrs).squeeze()
@@ -56,27 +59,26 @@ class ModelData:
Class responsible for handling input data and extracting metadata into the
appropriate form
"""
+
_param_names = None
_cov_names = None
- def __init__(self, endog, exog=None, missing='none', hasconst=None,
- **kwargs):
+ def __init__(self, endog, exog=None, missing="none", hasconst=None, **kwargs):
if data_util._is_recarray(endog) or data_util._is_recarray(exog):
from statsmodels.tools.sm_exceptions import recarray_exception
+
raise NotImplementedError(recarray_exception)
- if 'design_info' in kwargs:
- self.design_info = kwargs.pop('design_info')
- if 'formula' in kwargs:
- self.formula = kwargs.pop('formula')
- if missing != 'none':
- arrays, nan_idx = self.handle_missing(endog, exog, missing,
- **kwargs)
+ if "design_info" in kwargs:
+ self.design_info = kwargs.pop("design_info")
+ if "formula" in kwargs:
+ self.formula = kwargs.pop("formula")
+ if missing != "none":
+ arrays, nan_idx = self.handle_missing(endog, exog, missing, **kwargs)
self.missing_row_idx = nan_idx
self.__dict__.update(arrays) # attach all the data arrays
self.orig_endog = self.endog
self.orig_exog = self.exog
- self.endog, self.exog = self._convert_endog_exog(self.endog,
- self.exog)
+ self.endog, self.exog = self._convert_endog_exog(self.endog, self.exog)
else:
self.__dict__.update(kwargs) # attach the extra arrays anyway
self.orig_endog = endog
@@ -91,6 +93,7 @@ class ModelData:
def __getstate__(self):
from copy import copy
+
d = copy(self.__dict__)
if "design_info" in d:
del d["design_info"]
@@ -100,20 +103,22 @@ class ModelData:
def __setstate__(self, d):
if "restore_design_info" in d:
# NOTE: there may be a more performant way to do this
- from patsy import dmatrices, PatsyError
+ from patsy import PatsyError, dmatrices
+
exc = []
try:
- data = d['frame']
+ data = d["frame"]
except KeyError:
- data = d['orig_endog'].join(d['orig_exog'])
+ data = d["orig_endog"].join(d["orig_exog"])
for depth in [2, 3, 1, 0, 4]: # sequence is a guess where to likely find it
try:
- _, design = dmatrices(d['formula'], data, eval_env=depth,
- return_type='dataframe')
+ _, design = dmatrices(
+ d["formula"], data, eval_env=depth, return_type="dataframe"
+ )
break
except (NameError, PatsyError) as e:
- exc.append(e) # why do I need a reference from outside except block
+ exc.append(e) # why do I need a reference from outside except block
pass
else:
raise exc[-1]
@@ -131,7 +136,7 @@ class ModelData:
check_implicit = False
exog_max = np.max(self.exog, axis=0)
if not np.isfinite(exog_max).all():
- raise MissingDataError('exog contains inf or nans')
+ raise MissingDataError("exog contains inf or nans")
exog_min = np.min(self.exog, axis=0)
const_idx = np.where(exog_max == exog_min)[0].squeeze()
self.k_constant = const_idx.size
@@ -155,7 +160,7 @@ class ModelData:
values.append(value)
else:
# we did not break, no column of ones
- pos = (np.array(values) != 0)
+ pos = np.array(values) != 0
if pos.any():
# take the first nonzero column
self.k_constant = 1
@@ -173,7 +178,8 @@ class ModelData:
# look for implicit constant
# Compute rank of augmented matrix
augmented_exog = np.column_stack(
- (np.ones(self.exog.shape[0]), self.exog))
+ (np.ones(self.exog.shape[0]), self.exog)
+ )
rank_augm = np.linalg.matrix_rank(augmented_exog)
rank_orig = np.linalg.matrix_rank(self.exog)
self.k_constant = int(rank_orig == rank_augm)
@@ -200,21 +206,21 @@ class ModelData:
none_array_names = []
# patsy's already dropped NaNs in y/X
- missing_idx = kwargs.pop('missing_idx', None)
+ missing_idx = kwargs.pop("missing_idx", None)
if missing_idx is not None:
# y, X already handled by patsy. add back in later.
combined = ()
combined_names = []
if exog is None:
- none_array_names += ['exog']
+ none_array_names += ["exog"]
elif exog is not None:
combined = (endog, exog)
- combined_names = ['endog', 'exog']
+ combined_names = ["endog", "exog"]
else:
combined = (endog,)
- combined_names = ['endog']
- none_array_names += ['exog']
+ combined_names = ["endog"]
+ none_array_names += ["exog"]
# deal with other arrays
combined_2d = ()
@@ -237,8 +243,9 @@ class ModelData:
combined_2d += (np.asarray(value_array),)
combined_2d_names += [key]
else:
- raise ValueError("Arrays with more than 2 dimensions "
- "are not yet handled")
+ raise ValueError(
+ "Arrays with more than 2 dimensions " "are not yet handled"
+ )
if missing_idx is not None:
nan_mask = missing_idx
@@ -246,16 +253,20 @@ class ModelData:
if combined: # there were extra arrays not handled by patsy
combined_nans = _nan_rows(*combined)
if combined_nans.shape[0] != nan_mask.shape[0]:
- raise ValueError("Shape mismatch between endog/exog "
- "and extra arrays given to model.")
+ raise ValueError(
+ "Shape mismatch between endog/exog "
+ "and extra arrays given to model."
+ )
# for going back and updated endog/exog
updated_row_mask = combined_nans[~nan_mask]
nan_mask |= combined_nans # for updating extra arrays only
if combined_2d:
combined_2d_nans = _nan_rows(combined_2d)
if combined_2d_nans.shape[0] != nan_mask.shape[0]:
- raise ValueError("Shape mismatch between endog/exog "
- "and extra 2d arrays given to model.")
+ raise ValueError(
+ "Shape mismatch between endog/exog "
+ "and extra 2d arrays given to model."
+ )
if updated_row_mask is not None:
updated_row_mask |= combined_2d_nans[~nan_mask]
else:
@@ -272,20 +283,19 @@ class ModelData:
if combined_2d:
combined.update(dict(zip(combined_2d_names, combined_2d)))
if none_array_names:
- combined.update({k: kwargs.get(k, None)
- for k in none_array_names})
+ combined.update({k: kwargs.get(k, None) for k in none_array_names})
if missing_idx is not None:
- combined.update({'endog': endog})
+ combined.update({"endog": endog})
if exog is not None:
- combined.update({'exog': exog})
+ combined.update({"exog": exog})
return combined, []
- elif missing == 'raise':
+ elif missing == "raise":
raise MissingDataError("NaNs were encountered in the data")
- elif missing == 'drop':
+ elif missing == "drop":
nan_mask = ~nan_mask
drop_nans = lambda x: cls._drop_nans(x, nan_mask)
drop_nans_2d = lambda x: cls._drop_nans_2d(x, nan_mask)
@@ -299,16 +309,16 @@ class ModelData:
if exog is not None:
exog = cls._drop_nans(exog, updated_row_mask)
- combined.update({'endog': endog})
+ combined.update({"endog": endog})
if exog is not None:
- combined.update({'exog': exog})
+ combined.update({"exog": exog})
if combined_2d:
- combined.update(dict(zip(combined_2d_names,
- lmap(drop_nans_2d, combined_2d))))
+ combined.update(
+ dict(zip(combined_2d_names, lmap(drop_nans_2d, combined_2d)))
+ )
if none_array_names:
- combined.update({k: kwargs.get(k, None)
- for k in none_array_names})
+ combined.update({k: kwargs.get(k, None) for k in none_array_names})
return combined, np.where(~nan_mask)[0].tolist()
else:
@@ -396,8 +406,7 @@ class ModelData:
if isinstance(arr, DataFrame):
if isinstance(arr.columns, MultiIndex):
# Flatten MultiIndexes into "simple" column names
- return ['_'.join(level for level in c if level)
- for c in arr.columns]
+ return ["_".join(level for level in c if level) for c in arr.columns]
else:
return list(arr.columns)
elif isinstance(arr, Series):
@@ -435,26 +444,26 @@ class ModelData:
if len(self.exog) != len(self.endog):
raise ValueError("endog and exog matrices are different sizes")
- def wrap_output(self, obj, how='columns', names=None):
- if how == 'columns':
+ def wrap_output(self, obj, how="columns", names=None):
+ if how == "columns":
return self.attach_columns(obj)
- elif how == 'rows':
+ elif how == "rows":
return self.attach_rows(obj)
- elif how == 'cov':
+ elif how == "cov":
return self.attach_cov(obj)
- elif how == 'dates':
+ elif how == "dates":
return self.attach_dates(obj)
- elif how == 'columns_eq':
+ elif how == "columns_eq":
return self.attach_columns_eq(obj)
- elif how == 'cov_eq':
+ elif how == "cov_eq":
return self.attach_cov_eq(obj)
- elif how == 'generic_columns':
+ elif how == "generic_columns":
return self.attach_generic_columns(obj, names)
- elif how == 'generic_columns_2d':
+ elif how == "generic_columns_2d":
return self.attach_generic_columns_2d(obj, names)
- elif how == 'ynames':
+ elif how == "ynames":
return self.attach_ynames(obj)
- elif how == 'multivariate_confint':
+ elif how == "multivariate_confint":
return self.attach_mv_confint(obj)
else:
return obj
@@ -502,12 +511,14 @@ class PandasData(ModelData):
"""
def _convert_endog_exog(self, endog, exog=None):
- #TODO: remove this when we handle dtype systematically
+ # TODO: remove this when we handle dtype systematically
endog = np.asarray(endog)
- exog = exog if exog is None else np.asarray(exog)
- if endog.dtype == object or exog is not None and exog.dtype == object:
- raise ValueError("Pandas data cast to numpy dtype of object. "
- "Check input data with np.asarray(data).")
+ exog = exog if exog is None else np.asarray(exog, dtype=float)
+ if endog.dtype == object:
+ raise ValueError(
+ "Pandas data cast to numpy dtype of object. "
+ "Check input data with np.asarray(data)."
+ )
return super()._convert_endog_exog(endog, exog)
@classmethod
@@ -527,9 +538,11 @@ class PandasData(ModelData):
def _check_integrity(self):
endog, exog = self.orig_endog, self.orig_exog
# exog can be None and we could be upcasting one or the other
- if (exog is not None and
- (hasattr(endog, 'index') and hasattr(exog, 'index')) and
- not self.orig_endog.index.equals(self.orig_exog.index)):
+ if (
+ exog is not None
+ and (hasattr(endog, "index") and hasattr(exog, "index"))
+ and not self.orig_endog.index.equals(self.orig_exog.index)
+ ):
raise ValueError("The indices for endog and exog are not aligned")
super()._check_integrity()
@@ -583,7 +596,7 @@ class PandasData(ModelData):
else:
out = DataFrame(result)
out.columns = self.ynames
- out.index = self.row_labels[-len(result):]
+ out.index = self.row_labels[-len(result) :]
return out
def attach_dates(self, result):
@@ -595,14 +608,14 @@ class PandasData(ModelData):
if squeezed.ndim < 2:
return Series(squeezed, index=self.predict_dates)
else:
- return DataFrame(np.asarray(result),
- index=self.predict_dates,
- columns=self.ynames)
+ return DataFrame(
+ np.asarray(result), index=self.predict_dates, columns=self.ynames
+ )
def attach_mv_confint(self, result):
- return DataFrame(result.reshape((-1, 2)),
- index=self.cov_names,
- columns=['lower', 'upper'])
+ return DataFrame(
+ result.reshape((-1, 2)), index=self.cov_names, columns=["lower", "upper"]
+ )
def attach_ynames(self, result):
squeezed = result.squeeze()
@@ -615,9 +628,9 @@ class PandasData(ModelData):
def _make_endog_names(endog):
if endog.ndim == 1 or endog.shape[1] == 1:
- ynames = ['y']
+ ynames = ["y"]
else: # for VAR
- ynames = ['y%d' % (i+1) for i in range(endog.shape[1])]
+ ynames = ["y%d" % (i + 1) for i in range(endog.shape[1])]
return ynames
@@ -628,17 +641,17 @@ def _make_exog_names(exog):
# assumes one constant in first or last position
# avoid exception if more than one constant
const_idx = exog_var.argmin()
- exog_names = ['x%d' % i for i in range(1, exog.shape[1])]
- exog_names.insert(const_idx, 'const')
+ exog_names = ["x%d" % i for i in range(1, exog.shape[1])]
+ exog_names.insert(const_idx, "const")
else:
- exog_names = ['x%d' % i for i in range(1, exog.shape[1]+1)]
+ exog_names = ["x%d" % i for i in range(1, exog.shape[1] + 1)]
return exog_names
-def handle_missing(endog, exog=None, missing='none', **kwargs):
+def handle_missing(endog, exog=None, missing="none", **kwargs):
klass = handle_data_class_factory(endog, exog)
- if missing == 'none':
+ if missing == "none":
ret_dict = dict(endog=endog, exog=exog)
ret_dict.update(kwargs)
return ret_dict, None
@@ -659,12 +672,13 @@ def handle_data_class_factory(endog, exog):
elif data_util._is_using_ndarray(endog, exog):
klass = ModelData
else:
- raise ValueError('unrecognized data structures: %s / %s' %
- (type(endog), type(exog)))
+ raise ValueError(
+ "unrecognized data structures: %s / %s" % (type(endog), type(exog))
+ )
return klass
-def handle_data(endog, exog, missing='none', hasconst=None, **kwargs):
+def handle_data(endog, exog, missing="none", hasconst=None, **kwargs):
# deal with lists and tuples up-front
if isinstance(endog, (list, tuple)):
endog = np.asarray(endog)
@@ -672,5 +686,4 @@ def handle_data(endog, exog, missing='none', hasconst=None, **kwargs):
exog = np.asarray(exog)
klass = handle_data_class_factory(endog, exog)
- return klass(endog, exog=exog, missing=missing, hasconst=hasconst,
- **kwargs)
+ return klass(endog, exog=exog, missing=missing, hasconst=hasconst, **kwargs)
| Unreliable auto-casting of pandas data in model fitters
#### Describe the bug
As of now `np.asarray` with no flags is used to convert model input into a proper numerical format, when fitting models such as OLS, WLS etc.
https://github.com/statsmodels/statsmodels/blob/a0eca865c65ef9336c6403f8ff4bc29a1d3ec26b/statsmodels/base/data.py#L504-L511
This is non-reliable, particularly for mixed data types, as shown below:
#### Code Sample, a copy-pastable example if possible
```python
import pandas as pd
import numpy as np
import statsmodels.api as sm
Xy = pd.DataFrame(data=[[0.0,False,0.1],[1.0,True,0.9],[1.0,True,0.9]],columns=['y','x1','x2'])
X = Xy[['x1','x2']] # FIXME: + .astype(float), otherwise type casting fails
y = Xy['y']
sm.OLS(y,X)
```
<details>
**Note**: As you can see, there are many issues on our GitHub tracker, so it is very possible that your issue has been posted before. Please check first before submitting so that we do not have to handle and close duplicates.
**Note**: Please be sure you are using the latest released version of `statsmodels`, or a recent build of `main`. If your problem has been fixed in an unreleased version, you might be able to use `main` until a new release occurs.
**Note**: If you are using a released version, have you verified that the bug exists in the main branch of this repository? It helps the limited resources if we know problems exist in the current main branch so that they do not need to check whether the code sample produces a bug in the next release.
</details>
#### Related Issues
Here https://github.com/statsmodels/statsmodels/issues/8794 opens a narrower discussion about non-reliability of handling nulls.
#### Suggested solution
I **suggest to inform numpy about numerical types** when casting
```python
np.asarray(Xy, dtype=np.float32)
```
#### Expected Output
Fitting does not produce errors.
#### Output of ``import statsmodels.api as sm; sm.show_versions()``
<details>
[paste the output of ``import statsmodels.api as sm; sm.show_versions()`` here below this line]
INSTALLED VERSIONS
------------------
Python: 3.10.12.final.0
OS: Linux 6.1.58+ #1 SMP PREEMPT_DYNAMIC Sat Nov 18 15:31:17 UTC 2023 x86_64
byteorder: little
LC_ALL: en_US.UTF-8
LANG: en_US.UTF-8
statsmodels
===========
Installed: 0.14.1 (/usr/local/lib/python3.10/dist-packages/statsmodels)
Required Dependencies
=====================
cython: 3.0.10 (/usr/local/lib/python3.10/dist-packages/Cython)
numpy: 1.25.2 (/usr/local/lib/python3.10/dist-packages/numpy)
scipy: 1.11.4 (/usr/local/lib/python3.10/dist-packages/scipy)
pandas: 2.0.3 (/usr/local/lib/python3.10/dist-packages/pandas)
dateutil: 2.8.2 (/usr/local/lib/python3.10/dist-packages/dateutil)
patsy: 0.5.6 (/usr/local/lib/python3.10/dist-packages/patsy)
Optional Dependencies
=====================
matplotlib: 3.7.1 (/usr/local/lib/python3.10/dist-packages/matplotlib)
backend: module://matplotlib_inline.backend_inline
cvxopt: 1.3.2 (/usr/local/lib/python3.10/dist-packages/cvxopt)
joblib: 1.4.0 (/usr/local/lib/python3.10/dist-packages/joblib)
Developer Tools
================
IPython: 7.34.0 (/usr/local/lib/python3.10/dist-packages/IPython)
jinja2: 3.1.3 (/usr/local/lib/python3.10/dist-packages/jinja2)
sphinx: 5.0.2 (/usr/local/lib/python3.10/dist-packages/sphinx)
pygments: 2.16.1 (/usr/local/lib/python3.10/dist-packages/pygments)
pytest: 7.4.4 (/usr/local/lib/python3.10/dist-packages/pytest)
virtualenv: Not installed
</details>
| statsmodels/statsmodels | diff --git a/statsmodels/base/tests/test_data.py b/statsmodels/base/tests/test_data.py
index 870a971f4..ea04a297a 100644
--- a/statsmodels/base/tests/test_data.py
+++ b/statsmodels/base/tests/test_data.py
@@ -1,21 +1,20 @@
from statsmodels.compat.pandas import (
- assert_series_equal,
assert_frame_equal,
+ assert_series_equal,
make_dataframe,
)
import numpy as np
-from numpy.testing import assert_equal, assert_, assert_raises
+from numpy.testing import assert_, assert_equal, assert_raises
import pandas as pd
import pytest
from statsmodels.base import data as sm_data
+from statsmodels.discrete.discrete_model import Logit
from statsmodels.formula import handle_formula_data
-from statsmodels.regression.linear_model import OLS
-from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
-from statsmodels.discrete.discrete_model import Logit
-
+from statsmodels.genmod.generalized_linear_model import GLM
+from statsmodels.regression.linear_model import OLS
# FIXME: do not leave commented-out, enable or move/remove
# class TestDates:
@@ -28,6 +27,7 @@ from statsmodels.discrete.discrete_model import Logit
# np.testing.assert_equal(data.wrap_output(self.dates_input, 'dates'),
# self.dates_result)
+
class TestArrays:
@classmethod
def setup_class(cls):
@@ -39,8 +39,8 @@ class TestArrays:
cls.col_result = cls.col_input = np.random.random(nvars)
cls.row_result = cls.row_input = np.random.random(nrows)
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
- cls.xnames = ['const', 'x1', 'x2']
- cls.ynames = 'y'
+ cls.xnames = ["const", "x1", "x2"]
+ cls.ynames = "y"
cls.row_labels = None
def test_orig(self):
@@ -55,12 +55,15 @@ class TestArrays:
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
- np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
- self.col_result)
- np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
- self.row_result)
- np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
- self.cov_result)
+ np.testing.assert_equal(
+ data.wrap_output(self.col_input, "columns"), self.col_result
+ )
+ np.testing.assert_equal(
+ data.wrap_output(self.row_input, "rows"), self.row_result
+ )
+ np.testing.assert_equal(
+ data.wrap_output(self.cov_input, "cov"), self.cov_result
+ )
def test_names(self):
data = self.data
@@ -95,8 +98,8 @@ class TestArrays1dExog(TestArrays):
exog = np.random.random(10)
cls.data = sm_data.handle_data(cls.endog, exog)
cls.exog = exog[:, None]
- cls.xnames = ['x1']
- cls.ynames = 'y'
+ cls.xnames = ["x1"]
+ cls.ynames = "y"
def test_orig(self):
np.testing.assert_equal(self.data.orig_endog, self.endog)
@@ -106,26 +109,23 @@ class TestArrays1dExog(TestArrays):
class TestDataFrames(TestArrays):
@classmethod
def setup_class(cls):
- cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
- exog = pd.DataFrame(np.random.random((10, 2)),
- columns=['x_1', 'x_2'])
- exog.insert(0, 'const', 1)
+ cls.endog = pd.DataFrame(np.random.random(10), columns=["y_1"])
+ exog = pd.DataFrame(np.random.random((10, 2)), columns=["x_1", "x_2"])
+ exog.insert(0, "const", 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
- cls.col_result = pd.Series(cls.col_input,
- index=exog.columns)
+ cls.col_result = pd.Series(cls.col_input, index=exog.columns)
cls.row_input = np.random.random(nrows)
- cls.row_result = pd.Series(cls.row_input,
- index=exog.index)
+ cls.row_result = pd.Series(cls.row_input, index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
- cls.cov_result = pd.DataFrame(cls.cov_input,
- index=exog.columns,
- columns=exog.columns)
- cls.xnames = ['const', 'x_1', 'x_2']
- cls.ynames = 'y_1'
+ cls.cov_result = pd.DataFrame(
+ cls.cov_input, index=exog.columns, columns=exog.columns
+ )
+ cls.xnames = ["const", "x_1", "x_2"]
+ cls.ynames = "y_1"
cls.row_labels = cls.exog.index
def test_orig(self):
@@ -140,22 +140,21 @@ class TestDataFrames(TestArrays):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
- assert_series_equal(data.wrap_output(self.col_input, 'columns'),
- self.col_result)
- assert_series_equal(data.wrap_output(self.row_input, 'rows'),
- self.row_result)
- assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
- self.cov_result)
+ assert_series_equal(
+ data.wrap_output(self.col_input, "columns"), self.col_result
+ )
+ assert_series_equal(data.wrap_output(self.row_input, "rows"), self.row_result)
+ assert_frame_equal(data.wrap_output(self.cov_input, "cov"), self.cov_result)
class TestDataFramesWithMultiIndex(TestDataFrames):
@classmethod
def setup_class(cls):
- cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
- mi = pd.MultiIndex.from_product([['x'], ['1', '2']])
+ cls.endog = pd.DataFrame(np.random.random(10), columns=["y_1"])
+ mi = pd.MultiIndex.from_product([["x"], ["1", "2"]])
exog = pd.DataFrame(np.random.random((10, 2)), columns=mi)
- exog_flattened_idx = pd.Index(['const', 'x_1', 'x_2'])
- exog.insert(0, 'const', 1)
+ exog_flattened_idx = pd.Index(["const", "x_1", "x_2"])
+ exog.insert(0, "const", 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
@@ -165,11 +164,11 @@ class TestDataFramesWithMultiIndex(TestDataFrames):
cls.row_input = np.random.random(nrows)
cls.row_result = pd.Series(cls.row_input, index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
- cls.cov_result = pd.DataFrame(cls.cov_input,
- index=exog_flattened_idx,
- columns=exog_flattened_idx)
- cls.xnames = ['const', 'x_1', 'x_2']
- cls.ynames = 'y_1'
+ cls.cov_result = pd.DataFrame(
+ cls.cov_input, index=exog_flattened_idx, columns=exog_flattened_idx
+ )
+ cls.xnames = ["const", "x_1", "x_2"]
+ cls.ynames = "y_1"
cls.row_labels = cls.exog.index
@@ -187,25 +186,22 @@ class TestListDataFrame(TestDataFrames):
def setup_class(cls):
cls.endog = np.random.random(10).tolist()
- exog = pd.DataFrame(np.random.random((10, 2)),
- columns=['x_1', 'x_2'])
- exog.insert(0, 'const', 1)
+ exog = pd.DataFrame(np.random.random((10, 2)), columns=["x_1", "x_2"])
+ exog.insert(0, "const", 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
- cls.col_result = pd.Series(cls.col_input,
- index=exog.columns)
+ cls.col_result = pd.Series(cls.col_input, index=exog.columns)
cls.row_input = np.random.random(nrows)
- cls.row_result = pd.Series(cls.row_input,
- index=exog.index)
+ cls.row_result = pd.Series(cls.row_input, index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
- cls.cov_result = pd.DataFrame(cls.cov_input,
- index=exog.columns,
- columns=exog.columns)
- cls.xnames = ['const', 'x_1', 'x_2']
- cls.ynames = 'y'
+ cls.cov_result = pd.DataFrame(
+ cls.cov_input, index=exog.columns, columns=exog.columns
+ )
+ cls.xnames = ["const", "x_1", "x_2"]
+ cls.ynames = "y"
cls.row_labels = cls.exog.index
def test_endogexog(self):
@@ -220,27 +216,24 @@ class TestListDataFrame(TestDataFrames):
class TestDataFrameList(TestDataFrames):
@classmethod
def setup_class(cls):
- cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
+ cls.endog = pd.DataFrame(np.random.random(10), columns=["y_1"])
- exog = pd.DataFrame(np.random.random((10, 2)),
- columns=['x1', 'x2'])
- exog.insert(0, 'const', 1)
+ exog = pd.DataFrame(np.random.random((10, 2)), columns=["x1", "x2"])
+ exog.insert(0, "const", 1)
cls.exog = exog.values.tolist()
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
- cls.col_result = pd.Series(cls.col_input,
- index=exog.columns)
+ cls.col_result = pd.Series(cls.col_input, index=exog.columns)
cls.row_input = np.random.random(nrows)
- cls.row_result = pd.Series(cls.row_input,
- index=exog.index)
+ cls.row_result = pd.Series(cls.row_input, index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
- cls.cov_result = pd.DataFrame(cls.cov_input,
- index=exog.columns,
- columns=exog.columns)
- cls.xnames = ['const', 'x1', 'x2']
- cls.ynames = 'y_1'
+ cls.cov_result = pd.DataFrame(
+ cls.cov_input, index=exog.columns, columns=exog.columns
+ )
+ cls.xnames = ["const", "x1", "x2"]
+ cls.ynames = "y_1"
cls.row_labels = cls.endog.index
def test_endogexog(self):
@@ -257,25 +250,22 @@ class TestArrayDataFrame(TestDataFrames):
def setup_class(cls):
cls.endog = np.random.random(10)
- exog = pd.DataFrame(np.random.random((10, 2)),
- columns=['x_1', 'x_2'])
- exog.insert(0, 'const', 1)
+ exog = pd.DataFrame(np.random.random((10, 2)), columns=["x_1", "x_2"])
+ exog.insert(0, "const", 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
- cls.col_result = pd.Series(cls.col_input,
- index=exog.columns)
+ cls.col_result = pd.Series(cls.col_input, index=exog.columns)
cls.row_input = np.random.random(nrows)
- cls.row_result = pd.Series(cls.row_input,
- index=exog.index)
+ cls.row_result = pd.Series(cls.row_input, index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
- cls.cov_result = pd.DataFrame(cls.cov_input,
- index=exog.columns,
- columns=exog.columns)
- cls.xnames = ['const', 'x_1', 'x_2']
- cls.ynames = 'y'
+ cls.cov_result = pd.DataFrame(
+ cls.cov_input, index=exog.columns, columns=exog.columns
+ )
+ cls.xnames = ["const", "x_1", "x_2"]
+ cls.ynames = "y"
cls.row_labels = cls.exog.index
def test_endogexog(self):
@@ -290,27 +280,26 @@ class TestArrayDataFrame(TestDataFrames):
class TestDataFrameArray(TestDataFrames):
@classmethod
def setup_class(cls):
- cls.endog = pd.DataFrame(np.random.random(10), columns=['y_1'])
+ cls.endog = pd.DataFrame(np.random.random(10), columns=["y_1"])
- exog = pd.DataFrame(np.random.random((10, 2)),
- columns=['x1', 'x2']) # names mimic defaults
- exog.insert(0, 'const', 1)
+ exog = pd.DataFrame(
+ np.random.random((10, 2)), columns=["x1", "x2"]
+ ) # names mimic defaults
+ exog.insert(0, "const", 1)
cls.exog = exog.values
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
- cls.col_result = pd.Series(cls.col_input,
- index=exog.columns)
+ cls.col_result = pd.Series(cls.col_input, index=exog.columns)
cls.row_input = np.random.random(nrows)
- cls.row_result = pd.Series(cls.row_input,
- index=exog.index)
+ cls.row_result = pd.Series(cls.row_input, index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
- cls.cov_result = pd.DataFrame(cls.cov_input,
- index=exog.columns,
- columns=exog.columns)
- cls.xnames = ['const', 'x1', 'x2']
- cls.ynames = 'y_1'
+ cls.cov_result = pd.DataFrame(
+ cls.cov_input, index=exog.columns, columns=exog.columns
+ )
+ cls.xnames = ["const", "x1", "x2"]
+ cls.ynames = "y_1"
cls.row_labels = cls.endog.index
def test_endogexog(self):
@@ -325,27 +314,24 @@ class TestDataFrameArray(TestDataFrames):
class TestSeriesDataFrame(TestDataFrames):
@classmethod
def setup_class(cls):
- cls.endog = pd.Series(np.random.random(10), name='y_1')
+ cls.endog = pd.Series(np.random.random(10), name="y_1")
- exog = pd.DataFrame(np.random.random((10, 2)),
- columns=['x_1', 'x_2'])
- exog.insert(0, 'const', 1)
+ exog = pd.DataFrame(np.random.random((10, 2)), columns=["x_1", "x_2"])
+ exog.insert(0, "const", 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
cls.col_input = np.random.random(nvars)
- cls.col_result = pd.Series(cls.col_input,
- index=exog.columns)
+ cls.col_result = pd.Series(cls.col_input, index=exog.columns)
cls.row_input = np.random.random(nrows)
- cls.row_result = pd.Series(cls.row_input,
- index=exog.index)
+ cls.row_result = pd.Series(cls.row_input, index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
- cls.cov_result = pd.DataFrame(cls.cov_input,
- index=exog.columns,
- columns=exog.columns)
- cls.xnames = ['const', 'x_1', 'x_2']
- cls.ynames = 'y_1'
+ cls.cov_result = pd.DataFrame(
+ cls.cov_input, index=exog.columns, columns=exog.columns
+ )
+ cls.xnames = ["const", "x_1", "x_2"]
+ cls.ynames = "y_1"
cls.row_labels = cls.exog.index
def test_orig(self):
@@ -356,25 +342,23 @@ class TestSeriesDataFrame(TestDataFrames):
class TestSeriesSeries(TestDataFrames):
@classmethod
def setup_class(cls):
- cls.endog = pd.Series(np.random.random(10), name='y_1')
+ cls.endog = pd.Series(np.random.random(10), name="y_1")
- exog = pd.Series(np.random.random(10), name='x_1')
+ exog = pd.Series(np.random.random(10), name="x_1")
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 1
cls.col_input = np.random.random(nvars)
- cls.col_result = pd.Series(cls.col_input,
- index=[exog.name])
+ cls.col_result = pd.Series(cls.col_input, index=[exog.name])
cls.row_input = np.random.random(nrows)
- cls.row_result = pd.Series(cls.row_input,
- index=exog.index)
+ cls.row_result = pd.Series(cls.row_input, index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
- cls.cov_result = pd.DataFrame(cls.cov_input,
- index=[exog.name],
- columns=[exog.name])
- cls.xnames = ['x_1']
- cls.ynames = 'y_1'
+ cls.cov_result = pd.DataFrame(
+ cls.cov_input, index=[exog.name], columns=[exog.name]
+ )
+ cls.xnames = ["x_1"]
+ cls.ynames = "y_1"
cls.row_labels = cls.exog.index
def test_orig(self):
@@ -392,14 +376,14 @@ def test_alignment():
d = load_pandas().data
# growth rates
- gs_l_realinv = 400 * np.log(d['realinv']).diff().dropna()
- gs_l_realgdp = 400 * np.log(d['realgdp']).diff().dropna()
- lint = d['realint'][:-1] # incorrect indexing for test purposes
+ gs_l_realinv = 400 * np.log(d["realinv"]).diff().dropna()
+ gs_l_realgdp = 400 * np.log(d["realgdp"]).diff().dropna()
+ lint = d["realint"][:-1] # incorrect indexing for test purposes
endog = gs_l_realinv
# re-index because they will not conform to lint
- realgdp = gs_l_realgdp.reindex(lint.index, method='bfill')
+ realgdp = gs_l_realgdp.reindex(lint.index, method="bfill")
data = dict(const=np.ones_like(lint), lrealgdp=realgdp, lint=lint)
exog = pd.DataFrame(data)
@@ -421,74 +405,77 @@ class TestMultipleEqsArrays(TestArrays):
cls.cov_result = cls.cov_input = np.random.random((nvars, nvars))
cls.cov_eq_result = cls.cov_eq_input = np.random.random((neqs, neqs))
cls.col_eq_result = cls.col_eq_input = np.array((neqs, nvars))
- cls.xnames = ['const', 'x1', 'x2']
- cls.ynames = ['y1', 'y2', 'y3', 'y4']
+ cls.xnames = ["const", "x1", "x2"]
+ cls.ynames = ["y1", "y2", "y3", "y4"]
cls.row_labels = None
def test_attach(self):
data = self.data
# this makes sure what the wrappers need work but not the wrapped
# results themselves
- np.testing.assert_equal(data.wrap_output(self.col_input, 'columns'),
- self.col_result)
- np.testing.assert_equal(data.wrap_output(self.row_input, 'rows'),
- self.row_result)
- np.testing.assert_equal(data.wrap_output(self.cov_input, 'cov'),
- self.cov_result)
- np.testing.assert_equal(data.wrap_output(self.cov_eq_input, 'cov_eq'),
- self.cov_eq_result)
- np.testing.assert_equal(data.wrap_output(self.col_eq_input,
- 'columns_eq'),
- self.col_eq_result)
+ np.testing.assert_equal(
+ data.wrap_output(self.col_input, "columns"), self.col_result
+ )
+ np.testing.assert_equal(
+ data.wrap_output(self.row_input, "rows"), self.row_result
+ )
+ np.testing.assert_equal(
+ data.wrap_output(self.cov_input, "cov"), self.cov_result
+ )
+ np.testing.assert_equal(
+ data.wrap_output(self.cov_eq_input, "cov_eq"), self.cov_eq_result
+ )
+ np.testing.assert_equal(
+ data.wrap_output(self.col_eq_input, "columns_eq"), self.col_eq_result
+ )
class TestMultipleEqsDataFrames(TestDataFrames):
@classmethod
def setup_class(cls):
- cls.endog = endog = pd.DataFrame(np.random.random((10, 4)),
- columns=['y_1', 'y_2', 'y_3', 'y_4'])
- exog = pd.DataFrame(np.random.random((10, 2)),
- columns=['x_1', 'x_2'])
- exog.insert(0, 'const', 1)
+ cls.endog = endog = pd.DataFrame(
+ np.random.random((10, 4)), columns=["y_1", "y_2", "y_3", "y_4"]
+ )
+ exog = pd.DataFrame(np.random.random((10, 2)), columns=["x_1", "x_2"])
+ exog.insert(0, "const", 1)
cls.exog = exog
cls.data = sm_data.handle_data(cls.endog, cls.exog)
nrows = 10
nvars = 3
neqs = 4
cls.col_input = np.random.random(nvars)
- cls.col_result = pd.Series(cls.col_input,
- index=exog.columns)
+ cls.col_result = pd.Series(cls.col_input, index=exog.columns)
cls.row_input = np.random.random(nrows)
- cls.row_result = pd.Series(cls.row_input,
- index=exog.index)
+ cls.row_result = pd.Series(cls.row_input, index=exog.index)
cls.cov_input = np.random.random((nvars, nvars))
- cls.cov_result = pd.DataFrame(cls.cov_input,
- index=exog.columns,
- columns=exog.columns)
+ cls.cov_result = pd.DataFrame(
+ cls.cov_input, index=exog.columns, columns=exog.columns
+ )
cls.cov_eq_input = np.random.random((neqs, neqs))
- cls.cov_eq_result = pd.DataFrame(cls.cov_eq_input,
- index=endog.columns,
- columns=endog.columns)
+ cls.cov_eq_result = pd.DataFrame(
+ cls.cov_eq_input, index=endog.columns, columns=endog.columns
+ )
cls.col_eq_input = np.random.random((nvars, neqs))
- cls.col_eq_result = pd.DataFrame(cls.col_eq_input,
- index=exog.columns,
- columns=endog.columns)
- cls.xnames = ['const', 'x_1', 'x_2']
- cls.ynames = ['y_1', 'y_2', 'y_3', 'y_4']
+ cls.col_eq_result = pd.DataFrame(
+ cls.col_eq_input, index=exog.columns, columns=endog.columns
+ )
+ cls.xnames = ["const", "x_1", "x_2"]
+ cls.ynames = ["y_1", "y_2", "y_3", "y_4"]
cls.row_labels = cls.exog.index
def test_attach(self):
data = self.data
- assert_series_equal(data.wrap_output(self.col_input, 'columns'),
- self.col_result)
- assert_series_equal(data.wrap_output(self.row_input, 'rows'),
- self.row_result)
- assert_frame_equal(data.wrap_output(self.cov_input, 'cov'),
- self.cov_result)
- assert_frame_equal(data.wrap_output(self.cov_eq_input, 'cov_eq'),
- self.cov_eq_result)
- assert_frame_equal(data.wrap_output(self.col_eq_input, 'columns_eq'),
- self.col_eq_result)
+ assert_series_equal(
+ data.wrap_output(self.col_input, "columns"), self.col_result
+ )
+ assert_series_equal(data.wrap_output(self.row_input, "rows"), self.row_result)
+ assert_frame_equal(data.wrap_output(self.cov_input, "cov"), self.cov_result)
+ assert_frame_equal(
+ data.wrap_output(self.cov_eq_input, "cov_eq"), self.cov_eq_result
+ )
+ assert_frame_equal(
+ data.wrap_output(self.col_eq_input, "columns_eq"), self.col_eq_result
+ )
class TestMissingArray:
@@ -504,13 +491,12 @@ class TestMissingArray:
@pytest.mark.smoke
def test_raise_no_missing(self):
# GH#1700
- sm_data.handle_data(np.random.random(20), np.random.random((20, 2)),
- 'raise')
+ sm_data.handle_data(np.random.random(20), np.random.random((20, 2)), "raise")
def test_raise(self):
with pytest.raises(Exception):
# TODO: be more specific about exception
- sm_data.handle_data(self.y, self.X, 'raise')
+ sm_data.handle_data(self.y, self.X, "raise")
def test_drop(self):
y = self.y
@@ -519,12 +505,12 @@ class TestMissingArray:
idx = ~np.isnan(combined).any(axis=1)
y = y[idx]
X = X[idx]
- data = sm_data.handle_data(self.y, self.X, 'drop')
+ data = sm_data.handle_data(self.y, self.X, "drop")
np.testing.assert_array_equal(data.endog, y)
np.testing.assert_array_equal(data.exog, X)
def test_none(self):
- data = sm_data.handle_data(self.y, self.X, 'none', hasconst=False)
+ data = sm_data.handle_data(self.y, self.X, "none", hasconst=False)
np.testing.assert_array_equal(data.endog, self.y)
np.testing.assert_array_equal(data.exog, self.X)
assert data.k_constant == 0
@@ -532,31 +518,31 @@ class TestMissingArray:
def test_endog_only_raise(self):
with pytest.raises(Exception):
# TODO: be more specific about exception
- sm_data.handle_data(self.y, None, 'raise')
+ sm_data.handle_data(self.y, None, "raise")
def test_endog_only_drop(self):
y = self.y
y = y[~np.isnan(y)]
- data = sm_data.handle_data(self.y, None, 'drop')
+ data = sm_data.handle_data(self.y, None, "drop")
np.testing.assert_array_equal(data.endog, y)
def test_mv_endog(self):
y = self.X
y = y[~np.isnan(y).any(axis=1)]
- data = sm_data.handle_data(self.X, None, 'drop')
+ data = sm_data.handle_data(self.X, None, "drop")
np.testing.assert_array_equal(data.endog, y)
def test_extra_kwargs_2d(self):
sigma = np.random.random((25, 25))
sigma = sigma + sigma.T - np.diag(np.diag(sigma))
- data = sm_data.handle_data(self.y, self.X, 'drop', sigma=sigma)
+ data = sm_data.handle_data(self.y, self.X, "drop", sigma=sigma)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
sigma = sigma[idx][:, idx]
np.testing.assert_array_equal(data.sigma, sigma)
def test_extra_kwargs_1d(self):
weights = np.random.random(25)
- data = sm_data.handle_data(self.y, self.X, 'drop', weights=weights)
+ data = sm_data.handle_data(self.y, self.X, "drop", weights=weights)
idx = ~np.isnan(np.c_[self.y, self.X]).any(axis=1)
weights = weights[idx]
np.testing.assert_array_equal(data.weights, weights)
@@ -576,14 +562,16 @@ class TestMissingPandas:
@pytest.mark.smoke
def test_raise_no_missing(self):
# GH#1700
- sm_data.handle_data(pd.Series(np.random.random(20)),
- pd.DataFrame(np.random.random((20, 2))),
- 'raise')
+ sm_data.handle_data(
+ pd.Series(np.random.random(20)),
+ pd.DataFrame(np.random.random((20, 2))),
+ "raise",
+ )
def test_raise(self):
with pytest.raises(Exception):
# TODO: be more specific about exception
- sm_data.handle_data(self.y, self.X, 'raise')
+ sm_data.handle_data(self.y, self.X, "raise")
def test_drop(self):
y = self.y
@@ -592,14 +580,14 @@ class TestMissingPandas:
idx = ~np.isnan(combined).any(axis=1)
y = y.loc[idx]
X = X.loc[idx]
- data = sm_data.handle_data(self.y, self.X, 'drop')
+ data = sm_data.handle_data(self.y, self.X, "drop")
np.testing.assert_array_equal(data.endog, y.values)
assert_series_equal(data.orig_endog, self.y.loc[idx])
np.testing.assert_array_equal(data.exog, X.values)
assert_frame_equal(data.orig_exog, self.X.loc[idx])
def test_none(self):
- data = sm_data.handle_data(self.y, self.X, 'none', hasconst=False)
+ data = sm_data.handle_data(self.y, self.X, "none", hasconst=False)
np.testing.assert_array_equal(data.endog, self.y.values)
np.testing.assert_array_equal(data.exog, self.X.values)
assert data.k_constant == 0
@@ -607,24 +595,48 @@ class TestMissingPandas:
def test_endog_only_raise(self):
with pytest.raises(Exception):
# TODO: be more specific about exception
- sm_data.handle_data(self.y, None, 'raise')
+ sm_data.handle_data(self.y, None, "raise")
def test_endog_only_drop(self):
y = self.y
y = y.dropna()
- data = sm_data.handle_data(self.y, None, 'drop')
+ data = sm_data.handle_data(self.y, None, "drop")
np.testing.assert_array_equal(data.endog, y.values)
def test_mv_endog(self):
y = self.X
y = y.loc[~np.isnan(y.values).any(axis=1)]
- data = sm_data.handle_data(self.X, None, 'drop')
+ data = sm_data.handle_data(self.X, None, "drop")
np.testing.assert_array_equal(data.endog, y.values)
def test_labels(self):
- labels = pd.Index([0, 1, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 15,
- 16, 17, 18, 19, 20, 21, 22, 23, 24])
- data = sm_data.handle_data(self.y, self.X, 'drop')
+ labels = pd.Index(
+ [
+ 0,
+ 1,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 11,
+ 12,
+ 13,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ ]
+ )
+ data = sm_data.handle_data(self.y, self.X, "drop")
np.testing.assert_(data.row_labels.equals(labels))
@@ -632,18 +644,19 @@ class TestConstant:
@classmethod
def setup_class(cls):
from statsmodels.datasets.longley import load_pandas
+
cls.data = load_pandas()
def test_array_constant(self):
exog = self.data.exog.copy()
- exog['const'] = 1
+ exog["const"] = 1
data = sm_data.handle_data(self.data.endog.values, exog.values)
np.testing.assert_equal(data.k_constant, 1)
np.testing.assert_equal(data.const_idx, 6)
def test_pandas_constant(self):
exog = self.data.exog.copy()
- exog['const'] = 1
+ exog["const"] = 1
data = sm_data.handle_data(self.data.endog, exog)
np.testing.assert_equal(data.k_constant, 1)
np.testing.assert_equal(data.const_idx, 6)
@@ -668,57 +681,57 @@ class TestHandleMissing:
df = make_dataframe()
df.iloc[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]]
- data, _ = sm_data.handle_missing(y, X, missing='drop')
+ data, _ = sm_data.handle_missing(y, X, missing="drop")
df = df.dropna()
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]]
- assert_frame_equal(data['exog'], X_exp)
- assert_series_equal(data['endog'], y_exp)
+ assert_frame_equal(data["exog"], X_exp)
+ assert_series_equal(data["endog"], y_exp)
def test_arrays(self):
arr = np.random.randn(20, 4)
arr[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = arr[:, 0], arr[:, 1:]
- data, _ = sm_data.handle_missing(y, X, missing='drop')
+ data, _ = sm_data.handle_missing(y, X, missing="drop")
bools_mask = np.ones(20, dtype=bool)
bools_mask[[2, 5, 10]] = False
y_exp = arr[bools_mask, 0]
X_exp = arr[bools_mask, 1:]
- np.testing.assert_array_equal(data['endog'], y_exp)
- np.testing.assert_array_equal(data['exog'], X_exp)
+ np.testing.assert_array_equal(data["endog"], y_exp)
+ np.testing.assert_array_equal(data["exog"], X_exp)
def test_pandas_array(self):
df = make_dataframe()
df.iloc[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]].values
- data, _ = sm_data.handle_missing(y, X, missing='drop')
+ data, _ = sm_data.handle_missing(y, X, missing="drop")
df = df.dropna()
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]].values
- np.testing.assert_array_equal(data['exog'], X_exp)
- assert_series_equal(data['endog'], y_exp)
+ np.testing.assert_array_equal(data["exog"], X_exp)
+ assert_series_equal(data["endog"], y_exp)
def test_array_pandas(self):
df = make_dataframe()
df.iloc[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]].values, df[df.columns[1:]]
- data, _ = sm_data.handle_missing(y, X, missing='drop')
+ data, _ = sm_data.handle_missing(y, X, missing="drop")
df = df.dropna()
y_exp, X_exp = df[df.columns[0]].values, df[df.columns[1:]]
- assert_frame_equal(data['exog'], X_exp)
- np.testing.assert_array_equal(data['endog'], y_exp)
+ assert_frame_equal(data["exog"], X_exp)
+ np.testing.assert_array_equal(data["endog"], y_exp)
def test_noop(self):
df = make_dataframe()
df.iloc[[2, 5, 10], [2, 3, 1]] = np.nan
y, X = df[df.columns[0]], df[df.columns[1:]]
- data, _ = sm_data.handle_missing(y, X, missing='none')
+ data, _ = sm_data.handle_missing(y, X, missing="none")
y_exp, X_exp = df[df.columns[0]], df[df.columns[1:]]
- assert_frame_equal(data['exog'], X_exp)
- assert_series_equal(data['endog'], y_exp)
+ assert_frame_equal(data["exog"], X_exp)
+ assert_series_equal(data["endog"], y_exp)
class CheckHasConstant:
@@ -734,7 +747,7 @@ class CheckHasConstant:
assert_equal(mod.data.const_idx, result[1])
# extra check after fit, some models raise on singular
- fit_kwds = getattr(self, 'fit_kwds', {})
+ fit_kwds = getattr(self, "fit_kwds", {})
try:
res = mod.fit(**fit_kwds)
except np.linalg.LinAlgError:
@@ -751,8 +764,7 @@ class CheckHasConstant:
cls.y_bin = (cls.y_c > 0).astype(int)
x1 = np.column_stack((np.ones(20), np.zeros(20)))
result1 = (1, 0)
- x2 = np.column_stack((np.arange(20) < 10.5,
- np.arange(20) > 10.5)).astype(float)
+ x2 = np.column_stack((np.arange(20) < 10.5, np.arange(20) > 10.5)).astype(float)
result2 = (1, None)
x3 = np.column_stack((np.arange(20), np.zeros(20)))
result3 = (0, None)
@@ -765,18 +777,27 @@ class CheckHasConstant:
x5c = np.column_stack((np.arange(20), np.ones((20, 3)) * [0.5, 1, 1]))
result5c = (1, 2)
# implicit and zero column
- x6 = np.column_stack((np.arange(20) < 10.5,
- np.arange(20) > 10.5,
- np.zeros(20))).astype(float)
+ x6 = np.column_stack(
+ (np.arange(20) < 10.5, np.arange(20) > 10.5, np.zeros(20))
+ ).astype(float)
result6 = (1, None)
- x7 = np.column_stack((np.arange(20) < 10.5,
- np.arange(20) > 10.5,
- np.zeros((20, 2)))).astype(float)
+ x7 = np.column_stack(
+ (np.arange(20) < 10.5, np.arange(20) > 10.5, np.zeros((20, 2)))
+ ).astype(float)
result7 = (1, None)
cls.exogs = (x1, x2, x3, x4, x5, x5b, x5c, x6, x7)
- cls.results = (result1, result2, result3, result4, result5, result5b,
- result5c, result6, result7)
+ cls.results = (
+ result1,
+ result2,
+ result3,
+ result4,
+ result5,
+ result5b,
+ result5c,
+ result6,
+ result7,
+ )
cls._initialize()
@@ -806,7 +827,7 @@ class TestHasConstantLogit(CheckHasConstant):
def _initialize(cls):
cls.mod = Logit
cls.y = cls.y_bin
- cls.fit_kwds = {'disp': False}
+ cls.fit_kwds = {"disp": False}
def test_dtype_object():
@@ -814,12 +835,24 @@ def test_dtype_object():
X = np.random.random((40, 2))
df = pd.DataFrame(X)
- df[2] = np.random.randint(2, size=40).astype('object')
- df['constant'] = 1
+ df[2] = np.random.randint(2, size=40).astype("object")
+ df["constant"] = 1
y = pd.Series(np.random.randint(2, size=40))
+ out = sm_data.handle_data(y, df)
+ assert isinstance(out, sm_data.PandasData)
+ assert_equal(out.endog, np.array(y))
+ assert_equal(out.exog, np.array(df))
+
- np.testing.assert_raises(ValueError, sm_data.handle_data, y, df)
+def test_dtype_actual_object():
+ X = np.random.random((40, 2))
+ df = pd.DataFrame(X)
+ df[2] = np.random.randint(2, size=40).astype("object")
+ df["constant"] = "A"
+ y = pd.Series(np.random.randint(2, size=40))
+ with pytest.raises(ValueError):
+ sm_data.handle_data(y, df)
def test_formula_missing_extra_arrays():
@@ -848,59 +881,62 @@ def test_formula_missing_extra_arrays():
weights_wrong_size = np.random.randn(12)
- data = {'y': y,
- 'X': X,
- 'y_missing': y_missing,
- 'X_missing': X_missing,
- 'weights': weights,
- 'weights_missing': weights_missing}
+ data = {
+ "y": y,
+ "X": X,
+ "y_missing": y_missing,
+ "X_missing": X_missing,
+ "weights": weights,
+ "weights_missing": weights_missing,
+ }
data = pd.DataFrame.from_dict(data)
- data['constant'] = 1
+ data["constant"] = 1
- formula = 'y_missing ~ X_missing'
+ formula = "y_missing ~ X_missing"
- ((endog, exog),
- missing_idx, design_info) = handle_formula_data(data, None, formula,
- depth=2,
- missing='drop')
+ ((endog, exog), missing_idx, design_info) = handle_formula_data(
+ data, None, formula, depth=2, missing="drop"
+ )
- kwargs = {'missing_idx': missing_idx, 'missing': 'drop',
- 'weights': data['weights_missing']}
+ kwargs = {
+ "missing_idx": missing_idx,
+ "missing": "drop",
+ "weights": data["weights_missing"],
+ }
model_data = sm_data.handle_data(endog, exog, **kwargs)
data_nona = data.dropna()
- assert_equal(data_nona['y'].values, model_data.endog)
- assert_equal(data_nona[['constant', 'X']].values, model_data.exog)
- assert_equal(data_nona['weights'].values, model_data.weights)
+ assert_equal(data_nona["y"].values, model_data.endog)
+ assert_equal(data_nona[["constant", "X"]].values, model_data.exog)
+ assert_equal(data_nona["weights"].values, model_data.weights)
- tmp = handle_formula_data(data, None, formula, depth=2, missing='drop')
+ tmp = handle_formula_data(data, None, formula, depth=2, missing="drop")
(endog, exog), missing_idx, design_info = tmp
weights_2d = np.random.randn(10, 10)
- weights_2d[[8, 7], [7, 8]] = np.nan # symmetric missing values
- kwargs.update({'weights': weights_2d,
- 'missing_idx': missing_idx})
+ weights_2d[[8, 7], [7, 8]] = np.nan # symmetric missing values
+ kwargs.update({"weights": weights_2d, "missing_idx": missing_idx})
model_data2 = sm_data.handle_data(endog, exog, **kwargs)
good_idx = [0, 4, 6, 9]
- assert_equal(data.loc[good_idx, 'y'], model_data2.endog)
- assert_equal(data.loc[good_idx, ['constant', 'X']], model_data2.exog)
+ assert_equal(data.loc[good_idx, "y"], model_data2.endog)
+ assert_equal(data.loc[good_idx, ["constant", "X"]], model_data2.exog)
assert_equal(weights_2d[good_idx][:, good_idx], model_data2.weights)
- tmp = handle_formula_data(data, None, formula, depth=2, missing='drop')
+ tmp = handle_formula_data(data, None, formula, depth=2, missing="drop")
(endog, exog), missing_idx, design_info = tmp
- kwargs.update({'weights': weights_wrong_size,
- 'missing_idx': missing_idx})
+ kwargs.update({"weights": weights_wrong_size, "missing_idx": missing_idx})
assert_raises(ValueError, sm_data.handle_data, endog, exog, **kwargs)
def test_raise_nonfinite_exog():
# we raise now in the has constant check before hitting the linear algebra
from statsmodels.tools.sm_exceptions import MissingDataError
- x = np.arange(10)[:, None]**([0., 1.])
+
+ x = np.arange(10)[:, None] ** ([0.0, 1.0])
# random numbers for y
- y = np.array([-0.6, -0.1, 0., -0.7, -0.5, 0.5, 0.1, -0.8, -2., 1.1])
+ y = np.array([-0.6, -0.1, 0.0, -0.7, -0.5, 0.5, 0.1, -0.8, -2.0, 1.1])
x[1, 1] = np.inf
assert_raises(MissingDataError, OLS, y, x)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 0.14 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
numpy==2.0.2
packaging==24.2
pandas==2.2.3
patsy==1.0.1
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
scipy==1.13.1
six==1.17.0
-e git+https://github.com/statsmodels/statsmodels.git@f29be609729882446846a930bec1afbdc78a0d2b#egg=statsmodels
tomli==2.2.1
tzdata==2025.2
| name: statsmodels
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- patsy==1.0.1
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scipy==1.13.1
- six==1.17.0
- statsmodels==0.15.0.dev511+gf29be6097
- tomli==2.2.1
- tzdata==2025.2
prefix: /opt/conda/envs/statsmodels
| [
"statsmodels/base/tests/test_data.py::test_dtype_object"
] | [] | [
"statsmodels/base/tests/test_data.py::TestArrays::test_orig",
"statsmodels/base/tests/test_data.py::TestArrays::test_endogexog",
"statsmodels/base/tests/test_data.py::TestArrays::test_attach",
"statsmodels/base/tests/test_data.py::TestArrays::test_names",
"statsmodels/base/tests/test_data.py::TestArrays::test_labels",
"statsmodels/base/tests/test_data.py::TestArrays2dEndog::test_orig",
"statsmodels/base/tests/test_data.py::TestArrays2dEndog::test_attach",
"statsmodels/base/tests/test_data.py::TestArrays2dEndog::test_names",
"statsmodels/base/tests/test_data.py::TestArrays2dEndog::test_labels",
"statsmodels/base/tests/test_data.py::TestArrays2dEndog::test_endogexog",
"statsmodels/base/tests/test_data.py::TestArrays1dExog::test_endogexog",
"statsmodels/base/tests/test_data.py::TestArrays1dExog::test_attach",
"statsmodels/base/tests/test_data.py::TestArrays1dExog::test_names",
"statsmodels/base/tests/test_data.py::TestArrays1dExog::test_labels",
"statsmodels/base/tests/test_data.py::TestArrays1dExog::test_orig",
"statsmodels/base/tests/test_data.py::TestDataFrames::test_names",
"statsmodels/base/tests/test_data.py::TestDataFrames::test_labels",
"statsmodels/base/tests/test_data.py::TestDataFrames::test_orig",
"statsmodels/base/tests/test_data.py::TestDataFrames::test_endogexog",
"statsmodels/base/tests/test_data.py::TestDataFrames::test_attach",
"statsmodels/base/tests/test_data.py::TestDataFramesWithMultiIndex::test_names",
"statsmodels/base/tests/test_data.py::TestDataFramesWithMultiIndex::test_labels",
"statsmodels/base/tests/test_data.py::TestDataFramesWithMultiIndex::test_orig",
"statsmodels/base/tests/test_data.py::TestDataFramesWithMultiIndex::test_endogexog",
"statsmodels/base/tests/test_data.py::TestDataFramesWithMultiIndex::test_attach",
"statsmodels/base/tests/test_data.py::TestLists::test_orig",
"statsmodels/base/tests/test_data.py::TestLists::test_endogexog",
"statsmodels/base/tests/test_data.py::TestLists::test_attach",
"statsmodels/base/tests/test_data.py::TestLists::test_names",
"statsmodels/base/tests/test_data.py::TestLists::test_labels",
"statsmodels/base/tests/test_data.py::TestListDataFrame::test_names",
"statsmodels/base/tests/test_data.py::TestListDataFrame::test_labels",
"statsmodels/base/tests/test_data.py::TestListDataFrame::test_attach",
"statsmodels/base/tests/test_data.py::TestListDataFrame::test_endogexog",
"statsmodels/base/tests/test_data.py::TestListDataFrame::test_orig",
"statsmodels/base/tests/test_data.py::TestDataFrameList::test_names",
"statsmodels/base/tests/test_data.py::TestDataFrameList::test_labels",
"statsmodels/base/tests/test_data.py::TestDataFrameList::test_attach",
"statsmodels/base/tests/test_data.py::TestDataFrameList::test_endogexog",
"statsmodels/base/tests/test_data.py::TestDataFrameList::test_orig",
"statsmodels/base/tests/test_data.py::TestArrayDataFrame::test_names",
"statsmodels/base/tests/test_data.py::TestArrayDataFrame::test_labels",
"statsmodels/base/tests/test_data.py::TestArrayDataFrame::test_attach",
"statsmodels/base/tests/test_data.py::TestArrayDataFrame::test_endogexog",
"statsmodels/base/tests/test_data.py::TestArrayDataFrame::test_orig",
"statsmodels/base/tests/test_data.py::TestDataFrameArray::test_names",
"statsmodels/base/tests/test_data.py::TestDataFrameArray::test_labels",
"statsmodels/base/tests/test_data.py::TestDataFrameArray::test_attach",
"statsmodels/base/tests/test_data.py::TestDataFrameArray::test_endogexog",
"statsmodels/base/tests/test_data.py::TestDataFrameArray::test_orig",
"statsmodels/base/tests/test_data.py::TestSeriesDataFrame::test_names",
"statsmodels/base/tests/test_data.py::TestSeriesDataFrame::test_labels",
"statsmodels/base/tests/test_data.py::TestSeriesDataFrame::test_endogexog",
"statsmodels/base/tests/test_data.py::TestSeriesDataFrame::test_attach",
"statsmodels/base/tests/test_data.py::TestSeriesDataFrame::test_orig",
"statsmodels/base/tests/test_data.py::TestSeriesSeries::test_names",
"statsmodels/base/tests/test_data.py::TestSeriesSeries::test_labels",
"statsmodels/base/tests/test_data.py::TestSeriesSeries::test_attach",
"statsmodels/base/tests/test_data.py::TestSeriesSeries::test_orig",
"statsmodels/base/tests/test_data.py::TestSeriesSeries::test_endogexog",
"statsmodels/base/tests/test_data.py::test_alignment",
"statsmodels/base/tests/test_data.py::TestMultipleEqsArrays::test_orig",
"statsmodels/base/tests/test_data.py::TestMultipleEqsArrays::test_endogexog",
"statsmodels/base/tests/test_data.py::TestMultipleEqsArrays::test_names",
"statsmodels/base/tests/test_data.py::TestMultipleEqsArrays::test_labels",
"statsmodels/base/tests/test_data.py::TestMultipleEqsArrays::test_attach",
"statsmodels/base/tests/test_data.py::TestMultipleEqsDataFrames::test_names",
"statsmodels/base/tests/test_data.py::TestMultipleEqsDataFrames::test_labels",
"statsmodels/base/tests/test_data.py::TestMultipleEqsDataFrames::test_orig",
"statsmodels/base/tests/test_data.py::TestMultipleEqsDataFrames::test_endogexog",
"statsmodels/base/tests/test_data.py::TestMultipleEqsDataFrames::test_attach",
"statsmodels/base/tests/test_data.py::TestMissingArray::test_raise_no_missing",
"statsmodels/base/tests/test_data.py::TestMissingArray::test_raise",
"statsmodels/base/tests/test_data.py::TestMissingArray::test_drop",
"statsmodels/base/tests/test_data.py::TestMissingArray::test_none",
"statsmodels/base/tests/test_data.py::TestMissingArray::test_endog_only_raise",
"statsmodels/base/tests/test_data.py::TestMissingArray::test_endog_only_drop",
"statsmodels/base/tests/test_data.py::TestMissingArray::test_mv_endog",
"statsmodels/base/tests/test_data.py::TestMissingArray::test_extra_kwargs_2d",
"statsmodels/base/tests/test_data.py::TestMissingArray::test_extra_kwargs_1d",
"statsmodels/base/tests/test_data.py::TestMissingPandas::test_raise_no_missing",
"statsmodels/base/tests/test_data.py::TestMissingPandas::test_raise",
"statsmodels/base/tests/test_data.py::TestMissingPandas::test_drop",
"statsmodels/base/tests/test_data.py::TestMissingPandas::test_none",
"statsmodels/base/tests/test_data.py::TestMissingPandas::test_endog_only_raise",
"statsmodels/base/tests/test_data.py::TestMissingPandas::test_endog_only_drop",
"statsmodels/base/tests/test_data.py::TestMissingPandas::test_mv_endog",
"statsmodels/base/tests/test_data.py::TestMissingPandas::test_labels",
"statsmodels/base/tests/test_data.py::TestConstant::test_array_constant",
"statsmodels/base/tests/test_data.py::TestConstant::test_pandas_constant",
"statsmodels/base/tests/test_data.py::TestConstant::test_pandas_noconstant",
"statsmodels/base/tests/test_data.py::TestConstant::test_array_noconstant",
"statsmodels/base/tests/test_data.py::TestHandleMissing::test_pandas",
"statsmodels/base/tests/test_data.py::TestHandleMissing::test_arrays",
"statsmodels/base/tests/test_data.py::TestHandleMissing::test_pandas_array",
"statsmodels/base/tests/test_data.py::TestHandleMissing::test_array_pandas",
"statsmodels/base/tests/test_data.py::TestHandleMissing::test_noop",
"statsmodels/base/tests/test_data.py::TestHasConstantOLS::test_hasconst",
"statsmodels/base/tests/test_data.py::TestHasConstantGLM::test_hasconst",
"statsmodels/base/tests/test_data.py::TestHasConstantLogit::test_hasconst",
"statsmodels/base/tests/test_data.py::test_dtype_actual_object",
"statsmodels/base/tests/test_data.py::test_formula_missing_extra_arrays",
"statsmodels/base/tests/test_data.py::test_raise_nonfinite_exog"
] | [] | BSD 3-Clause "New" or "Revised" License | 20,040 | 4,576 | [
"statsmodels/base/data.py"
] |
|
tobymao__sqlglot-4287 | c3c199714df04edfe3698594680bac06575ca285 | 2024-10-24 14:43:35 | 559e7bc5bbc77e94dea6de0470659b3c3fa6851f | VaggelisD: > You checked and other dialects are ok with this?
Yep, no other (main) dialect had an issue parsing the commonly roundtripped version.
georgesittas: Sounds good, thanks 👍 | diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py
index c75a67ca..85756d94 100644
--- a/sqlglot/dialects/clickhouse.py
+++ b/sqlglot/dialects/clickhouse.py
@@ -1212,3 +1212,12 @@ class ClickHouse(Dialect):
def projectiondef_sql(self, expression: exp.ProjectionDef) -> str:
return f"PROJECTION {self.sql(expression.this)} {self.wrap(expression.expression)}"
+
+ def is_sql(self, expression: exp.Is) -> str:
+ is_sql = super().is_sql(expression)
+
+ if isinstance(expression.parent, exp.Not) and isinstance(expression.this, exp.Subquery):
+ # WHERE (SELECT ...) IS NOT NULL -> NOT ((SELECT ...) IS NULL)
+ is_sql = self.wrap(is_sql)
+
+ return is_sql
| IS NOT NULL works incorrectly for subquery
SQLGlot parses `IS NOT NULL` to `NOT ... IS NULL` without parentheses. For example SQLGlot should parse like `NOT (... IS NULL)`
```python
from sqlglot import Dialects, parse_one
sql = """
SELECT name
FROM data
WHERE (SELECT DISTINCT name FROM data) IS NOT NULL
"""
print(parse_one(sql, read=Dialects.CLICKHOUSE).sql(dialect=Dialects.CLICKHOUSE))
```
❌ Wrong output with the version SQLGlot v25.27.0
```sql
SELECT name FROM data WHERE NOT (SELECT DISTINCT name FROM data) IS NULL
```
✅ Expected correct output
```sql
SELECT name FROM data WHERE NOT ((SELECT DISTINCT name FROM data) IS NULL)
``` | tobymao/sqlglot | diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index 9a1a7e36..33ff02c5 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -526,6 +526,10 @@ class TestClickhouse(Validator):
"SELECT * FROM ABC WHERE hasAny(COLUMNS('.*field') APPLY(toUInt64) APPLY(to), (SELECT groupUniqArray(toUInt64(field))))"
)
self.validate_identity("SELECT col apply", "SELECT col AS apply")
+ self.validate_identity(
+ "SELECT name FROM data WHERE (SELECT DISTINCT name FROM data) IS NOT NULL",
+ "SELECT name FROM data WHERE NOT ((SELECT DISTINCT name FROM data) IS NULL)",
+ )
def test_clickhouse_values(self):
values = exp.select("*").from_(
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 25.27 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
identify==2.6.9
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
Pygments==2.19.1
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.4.3
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@c3c199714df04edfe3698594680bac06575ca285#egg=sqlglot
tomli==2.2.1
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- identify==2.6.9
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pygments==2.19.1
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.4.3
- six==1.17.0
- tomli==2.2.1
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse"
] | [] | [
"tests/dialects/test_clickhouse.py::TestClickhouse::test_agg_functions",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_array_join",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse_values",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_convert",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_cte",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_datetime_funcs",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_drop_on_cluster",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_geom_types",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_grant",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_timestr_to_time",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_traverse_scope"
] | [] | MIT License | 20,046 | 218 | [
"sqlglot/dialects/clickhouse.py"
] |
reichlab__cladetime-44 | 584738567164d525456cc57903495a74dd6c71b2 | 2024-10-24 17:50:54 | 584738567164d525456cc57903495a74dd6c71b2 | diff --git a/src/cladetime/util/sequence.py b/src/cladetime/util/sequence.py
index 1057073..a50d19d 100644
--- a/src/cladetime/util/sequence.py
+++ b/src/cladetime/util/sequence.py
@@ -147,7 +147,11 @@ def _get_ncov_metadata(
)
return {}
- return response.json()
+ metadata = response.json()
+ if metadata.get("nextclade_dataset_name", "").lower() == "sars-cov-2":
+ metadata["nextclade_dataset_name_full"] = "nextstrain/sars-cov-2/wuhan-hu-1/orfs"
+
+ return metadata
def filter_covid_genome_metadata(metadata: pl.LazyFrame, cols: list = []) -> pl.LazyFrame:
@@ -175,11 +179,15 @@ def filter_covid_genome_metadata(metadata: pl.LazyFrame, cols: list = []) -> pl.
.filter(
pl.col("country") == "USA",
pl.col("division").is_in(states),
- pl.col("date").is_not_null(),
pl.col("host") == "Homo sapiens",
)
.rename({"clade_nextstrain": "clade", "division": "location"})
.cast({"date": pl.Date}, strict=False)
+ # date filtering at the end ensures we filter out null
+ # values created by the above .cast operation
+ .filter(
+ pl.col("date").is_not_null(),
+ )
)
return filtered_metadata
| Missing date values
Clade counts have rows with a missing `date`:
```
> read_parquet("https://covid-clade-counts.s3.amazonaws.com/2024-10-14_covid_clade_counts.parquet") |>
+ filter(is.na(date))
# A tibble: 197 × 4
location date clade count
<chr> <date> <chr> <int>
1 South Dakota NA 23I 3
2 Virginia NA 21H 12
3 Mississippi NA 20I 8
4 Virginia NA 23H 12
5 Louisiana NA 22C 1
6 Virginia NA 23E 7
7 Maryland NA 22E 1
8 South Dakota NA 20A 504
9 Nebraska NA 21C 2
10 Maryland NA 21F 1
# ℹ 187 more rows
# ℹ Use `print(n = ...)` to see more rows
```
The above file was generated using [this script](https://github.com/reichlab/get-covid-clade-counts/blob/main/get_covid_clade_counts.py).
Do we have an understanding of how these missing dates arise? Is this a bug we need to fix, or something about the source data? | reichlab/cladetime | diff --git a/tests/unit/util/test_sequence.py b/tests/unit/util/test_sequence.py
index b5f21b7..2532c7d 100644
--- a/tests/unit/util/test_sequence.py
+++ b/tests/unit/util/test_sequence.py
@@ -94,15 +94,23 @@ def test_download_covid_genome_metadata_no_history(s3_setup, tmp_path, mock_sess
def test_filter_covid_genome_metadata():
test_genome_metadata = {
- "date": ["2022-01-01", "2022-01-02", "2022-01-03", "2023-12-25", None, "2023-12-27"],
- "host": ["Homo sapiens", "Homo sapiens", "Homo sapiens", "Narwhals", "Homo sapiens", "Homo sapiens"],
- "country": ["USA", "Argentina", "USA", "USA", "USA", "USA"],
- "division": ["Alaska", "Maine", "Guam", "Puerto Rico", "Utah", "Pennsylvania"],
- "clade_nextstrain": ["AAA", "BBB", "CCC", "DDD", "EEE", "FFF"],
- "location": ["Vulcan", "Reisa", "Bajor", "Deep Space 9", "Earth", "Cardassia"],
- "genbank_accession": ["A1", "A2", "B1", "B2", "C1", "C2"],
- "genbank_accession_rev": ["A1.1", "A2.4", "B1.1", "B2.5", "C1.1", "C2.1"],
- "unwanted_column": [1, 2, 3, 4, 5, 6],
+ "date": ["2022-01-01", "2022-01-02", "2022-01-03", "2023-12-25", None, "2023-12-27", "2023-05"],
+ "host": [
+ "Homo sapiens",
+ "Homo sapiens",
+ "Homo sapiens",
+ "Narwhals",
+ "Homo sapiens",
+ "Homo sapiens",
+ "Homo sapiens",
+ ],
+ "country": ["USA", "Argentina", "USA", "USA", "USA", "USA", "USA"],
+ "division": ["Alaska", "Maine", "Guam", "Puerto Rico", "Utah", "Pennsylvania", "Pennsylvania"],
+ "clade_nextstrain": ["AAA", "BBB", "CCC", "DDD", "EEE", "FFF", "FFF"],
+ "location": ["Vulcan", "Reisa", "Bajor", "Deep Space 9", "Earth", "Cardassia", "Cardassia"],
+ "genbank_accession": ["A1", "A2", "B1", "B2", "C1", "C2", "C2"],
+ "genbank_accession_rev": ["A1.1", "A2.4", "B1.1", "B2.5", "C1.1", "C2.1", "C2.1"],
+ "unwanted_column": [1, 2, 3, 4, 5, 6, 7],
}
lf_metadata = pl.LazyFrame(test_genome_metadata)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -r requirements/requirements-dev.txt && pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.12",
"reqs_path": [
"requirements/requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | awscli==1.32.116
boto3==1.34.116
boto3-stubs==1.35.28
botocore==1.34.116
botocore-stubs==1.35.28
certifi==2024.2.2
cffi==1.17.1
charset-normalizer==3.3.2
-e git+https://github.com/reichlab/cladetime.git@584738567164d525456cc57903495a74dd6c71b2#egg=cladetime
click==8.1.7
cloudpathlib==0.18.1
colorama==0.4.6
coverage==7.5.3
cryptography==43.0.1
docutils==0.16
freezegun==1.5.1
idna==3.7
iniconfig==2.0.0
jellyfish==1.1.0
Jinja2==3.1.4
jmespath==1.0.1
markdown-it-py==3.0.0
MarkupSafe==2.1.5
mdurl==0.1.2
moto==5.0.15
mypy==1.10.1
mypy-boto3-s3==1.35.22
mypy-extensions==1.0.0
numpy==1.26.4
packaging==24.0
pandas==2.2.2
pluggy==1.5.0
polars==1.6.0
pyarrow==16.1.0
pyasn1==0.6.0
pycparser==2.22
Pygments==2.18.0
pytest==8.2.1
pytest-mock==3.14.0
python-dateutil==2.9.0.post0
pytz==2024.1
PyYAML==6.0.1
requests==2.32.3
responses==0.25.3
rich==13.7.1
rich-click==1.8.2
rsa==4.7.2
ruff==0.5.0
s3transfer==0.10.1
setuptools==75.8.0
six==1.16.0
structlog==24.2.0
types-awscrt==0.21.5
types-python-dateutil==2.9.0.20240906
types-requests==2.32.0.20240914
types-s3transfer==0.10.2
typing_extensions==4.12.0
tzdata==2024.1
urllib3==2.2.1
us==3.2.0
Werkzeug==3.0.4
wheel==0.45.1
xmltodict==0.13.0
| name: cladetime
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- expat=2.6.4=h6a678d5_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py312h06a4308_0
- python=3.12.9=h5148396_0
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py312h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py312h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- awscli==1.32.116
- boto3==1.34.116
- boto3-stubs==1.35.28
- botocore==1.34.116
- botocore-stubs==1.35.28
- certifi==2024.2.2
- cffi==1.17.1
- charset-normalizer==3.3.2
- cladetime==0.2.0
- click==8.1.7
- cloudpathlib==0.18.1
- colorama==0.4.6
- coverage==7.5.3
- cryptography==43.0.1
- docutils==0.16
- freezegun==1.5.1
- idna==3.7
- iniconfig==2.0.0
- jellyfish==1.1.0
- jinja2==3.1.4
- jmespath==1.0.1
- markdown-it-py==3.0.0
- markupsafe==2.1.5
- mdurl==0.1.2
- moto==5.0.15
- mypy==1.10.1
- mypy-boto3-s3==1.35.22
- mypy-extensions==1.0.0
- numpy==1.26.4
- packaging==24.0
- pandas==2.2.2
- pluggy==1.5.0
- polars==1.6.0
- pyarrow==16.1.0
- pyasn1==0.6.0
- pycparser==2.22
- pygments==2.18.0
- pytest==8.2.1
- pytest-mock==3.14.0
- python-dateutil==2.9.0.post0
- pytz==2024.1
- pyyaml==6.0.1
- requests==2.32.3
- responses==0.25.3
- rich==13.7.1
- rich-click==1.8.2
- rsa==4.7.2
- ruff==0.5.0
- s3transfer==0.10.1
- six==1.16.0
- structlog==24.2.0
- types-awscrt==0.21.5
- types-python-dateutil==2.9.0.20240906
- types-requests==2.32.0.20240914
- types-s3transfer==0.10.2
- typing-extensions==4.12.0
- tzdata==2024.1
- urllib3==2.2.1
- us==3.2.0
- werkzeug==3.0.4
- xmltodict==0.13.0
prefix: /opt/conda/envs/cladetime
| [
"tests/unit/util/test_sequence.py::test_filter_covid_genome_metadata"
] | [] | [
"tests/unit/util/test_sequence.py::test_get_covid_genome_metadata[metadata.tsv.zst]",
"tests/unit/util/test_sequence.py::test_get_covid_genome_metadata[metadata.tsv.xz]",
"tests/unit/util/test_sequence.py::test_get_covid_genome_metadata_url[metadata.tsv.zst]",
"tests/unit/util/test_sequence.py::test_get_covid_genome_metadata_url[metadata.tsv.xz]",
"tests/unit/util/test_sequence.py::test_download_covid_genome_metadata[None-2025-03-31-metadata.tsv.zst]",
"tests/unit/util/test_sequence.py::test_download_covid_genome_metadata[2023-03-20-2023-03-20-metadata.tsv.zst]",
"tests/unit/util/test_sequence.py::test_download_covid_genome_metadata_no_history",
"tests/unit/util/test_sequence.py::test_parse_sequence_assignments",
"tests/unit/util/test_sequence.py::test_parse_sequence_duplicates"
] | [] | MIT License | 20,048 | 368 | [
"src/cladetime/util/sequence.py"
] |
|
narwhals-dev__narwhals-1246 | f6b47c9a84de8533ccb66e66b93d2c23dc393e41 | 2024-10-24 18:06:39 | 72a867832bc07a2aa5fc8a769786f88e3a7e30dc | MarcoGorelli: 😄 thanks, and great gif! | diff --git a/narwhals/utils.py b/narwhals/utils.py
index 37cce17d..34b45447 100644
--- a/narwhals/utils.py
+++ b/narwhals/utils.py
@@ -31,6 +31,7 @@ from narwhals.translate import to_native
if TYPE_CHECKING:
from types import ModuleType
+ import pandas as pd
from typing_extensions import Self
from typing_extensions import TypeGuard
@@ -335,10 +336,16 @@ def maybe_reset_index(obj: T) -> T:
obj_any = cast(Any, obj)
native_obj = to_native(obj_any)
if is_pandas_like_dataframe(native_obj):
+ native_namespace = obj_any.__native_namespace__()
+ if _has_default_index(native_obj, native_namespace):
+ return obj_any # type: ignore[no-any-return]
return obj_any._from_compliant_dataframe( # type: ignore[no-any-return]
obj_any._compliant_frame._from_native_frame(native_obj.reset_index(drop=True))
)
if is_pandas_like_series(native_obj):
+ native_namespace = obj_any.__native_namespace__()
+ if _has_default_index(native_obj, native_namespace):
+ return obj_any # type: ignore[no-any-return]
return obj_any._from_compliant_series( # type: ignore[no-any-return]
obj_any._compliant_series._from_native_series(
native_obj.reset_index(drop=True)
@@ -347,6 +354,18 @@ def maybe_reset_index(obj: T) -> T:
return obj_any # type: ignore[no-any-return]
+def _has_default_index(
+ native_frame_or_series: pd.Series | pd.DataFrame, native_namespace: Any
+) -> bool:
+ index = native_frame_or_series.index
+ return (
+ isinstance(index, native_namespace.RangeIndex)
+ and index.start == 0
+ and index.stop == len(index)
+ and index.step == 1
+ )
+
+
def maybe_convert_dtypes(obj: T, *args: bool, **kwargs: bool | str) -> T:
"""
Convert columns or series to the best possible dtypes using dtypes supporting ``pd.NA``, if df is pandas-like.
| perf: make `maybe_reset_index` a no-op if object has default index
In pandas < 3.0 without copy-on-write enabled, `reset_index` always creates a copy. This makes sense in pandas, as the pandas api has quite a few in-place methods
In the Narwhals API, we have zero in-place methods, so we can optimise `maybe_reset_index` by doing an early return of the object if:
- the input object `obj` is a pandas-like dataframe or pandas_like series
- the index `idx` is an instance of `native_namespace.RangeIndex`
- `idx.start == 0`
- `idx.stop == len(obj) - 1`
- `idx.step == 1`
This should help with performance in the plotly PR | narwhals-dev/narwhals | diff --git a/tests/utils_test.py b/tests/utils_test.py
index aede36ce..e06cc9ca 100644
--- a/tests/utils_test.py
+++ b/tests/utils_test.py
@@ -94,12 +94,22 @@ def test_maybe_reset_index_pandas() -> None:
result = nw.maybe_reset_index(pandas_df)
expected = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[0, 1, 2])
assert_frame_equal(nw.to_native(result), expected)
+ pandas_df = nw.from_native(pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}))
+ result = nw.maybe_reset_index(pandas_df)
+ expected = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
+ assert_frame_equal(nw.to_native(result), expected)
+ assert result.to_native() is pandas_df.to_native()
pandas_series = nw.from_native(
pd.Series([1, 2, 3], index=[7, 8, 9]), series_only=True
)
result_s = nw.maybe_reset_index(pandas_series)
expected_s = pd.Series([1, 2, 3], index=[0, 1, 2])
assert_series_equal(nw.to_native(result_s), expected_s)
+ pandas_series = nw.from_native(pd.Series([1, 2, 3]), series_only=True)
+ result_s = nw.maybe_reset_index(pandas_series)
+ expected_s = pd.Series([1, 2, 3])
+ assert_series_equal(nw.to_native(result_s), expected_s)
+ assert result_s.to_native() is pandas_series.to_native()
def test_maybe_reset_index_polars() -> None:
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-randomly pytest-env"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
cfgv==3.4.0
click==8.1.8
cloudpickle==3.1.1
covdefaults==2.3.0
coverage==7.8.0
dask==2024.8.0
dask-expr==1.1.10
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
filelock==3.18.0
fsspec==2025.3.1
hypothesis==6.130.5
identify==2.6.9
importlib_metadata==8.6.1
iniconfig==2.1.0
joblib==1.4.2
locket==1.0.0
-e git+https://github.com/narwhals-dev/narwhals.git@f6b47c9a84de8533ccb66e66b93d2c23dc393e41#egg=narwhals
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
partd==1.4.2
platformdirs==4.3.7
pluggy==1.5.0
polars==1.26.0
pre_commit==4.2.0
pyarrow==19.0.1
pytest==8.3.5
pytest-cov==6.0.0
pytest-env==1.1.5
pytest-randomly==3.16.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
sortedcontainers==2.4.0
threadpoolctl==3.6.0
tomli==2.2.1
toolz==1.0.0
tqdm==4.67.1
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
zipp==3.21.0
| name: narwhals
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- cfgv==3.4.0
- click==8.1.8
- cloudpickle==3.1.1
- covdefaults==2.3.0
- coverage==7.8.0
- dask==2024.8.0
- dask-expr==1.1.10
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- fsspec==2025.3.1
- hypothesis==6.130.5
- identify==2.6.9
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- joblib==1.4.2
- locket==1.0.0
- narwhals==1.10.0
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- partd==1.4.2
- platformdirs==4.3.7
- pluggy==1.5.0
- polars==1.26.0
- pre-commit==4.2.0
- pyarrow==19.0.1
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-env==1.1.5
- pytest-randomly==3.16.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- sortedcontainers==2.4.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- toolz==1.0.0
- tqdm==4.67.1
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/narwhals
| [
"tests/utils_test.py::test_maybe_reset_index_pandas"
] | [] | [
"tests/utils_test.py::test_maybe_get_index_pandas",
"tests/utils_test.py::test_maybe_convert_dtypes_pandas",
"tests/utils_test.py::test_maybe_reset_index_polars",
"tests/utils_test.py::test_with_columns_sort",
"tests/utils_test.py::test_maybe_align_index_polars",
"tests/utils_test.py::test_maybe_align_index_pandas",
"tests/utils_test.py::test_non_unique_index",
"tests/utils_test.py::test_maybe_set_index_polars",
"tests/utils_test.py::test_get_trivial_version_with_uninstalled_module",
"tests/utils_test.py::test_maybe_convert_dtypes_polars",
"tests/utils_test.py::test_maybe_get_index_polars",
"tests/utils_test.py::test_maybe_set_index_pandas"
] | [] | MIT License | 20,049 | 523 | [
"narwhals/utils.py"
] |
radiocosmology__alpenhorn-196 | c4a1a50579188c99d35eec765d8be31a70f60154 | 2024-10-24 21:00:16 | c4a1a50579188c99d35eec765d8be31a70f60154 | diff --git a/alpenhorn/io/lfs.py b/alpenhorn/io/lfs.py
index 06e2777..9ec2d6a 100644
--- a/alpenhorn/io/lfs.py
+++ b/alpenhorn/io/lfs.py
@@ -36,6 +36,7 @@ from alpenhorn import util
if TYPE_CHECKING:
import os
+del TYPE_CHECKING
log = logging.getLogger(__name__)
@@ -113,7 +114,7 @@ class LFS:
if self._lfs is None:
raise RuntimeError("lfs command not found.")
- def run_lfs(self, *args: str) -> str | None:
+ def run_lfs(self, *args: str, timeout: float | None = None) -> str | False | None:
"""Run the lfs command with the `args` provided.
Parameters
@@ -121,26 +122,35 @@ class LFS:
*args : strings
The list of command-line arguments to pass to the
lfs command.
+ timeout : float, optional
+ If not None, stop waiting for the command after `timeout` seconds
Retunrs
-------
- output : str or None
+ output : str or False or None
If the command succeeded, returns standard output of
- the command. If the command failed, returns None and
- logs the failure.
+ the command. If the command failed or timed out,
+ returns False (failed) or None (timed out) and logs
+ the failure.
"""
# Stringify args
args = [str(arg) for arg in args]
- ret, stdout, stderr = util.run_command([self._lfs] + args)
+ ret, stdout, stderr = util.run_command([self._lfs] + args, timeout=timeout)
- if ret != 0:
- log.warning(f"LFS command failed (ret={ret}): " + " ".join(args))
+ # Failure or timeout
+ if ret is None or ret != 0:
+ if ret is None:
+ result = "timed out"
+ else:
+ result = f"failed (ret={ret})"
+ ret = False
+ log.warning(f"LFS command {result}: " + " ".join(args))
if stderr:
log.debug(f"LFS stderr: {stderr}")
if stdout:
log.debug(f"LFS stdout: {stdout}")
- return None
+ return ret
return stdout
@@ -259,7 +269,7 @@ class LFS:
path = str(path)
stdout = self.run_lfs("hsm_state", path)
- if stdout is None:
+ if stdout is False:
return None # Command returned error
# The output of hsm_state looks like this:
@@ -304,7 +314,7 @@ class LFS:
"""Is `path` released to external storage?"""
return self.hsm_state(path) == HSMState.RELEASED
- def hsm_restore(self, path: os.PathLike) -> bool:
+ def hsm_restore(self, path: os.PathLike) -> bool | None:
"""Trigger restore of `path` from external storage.
If `path` is already restored or is missing, this does nothing.
@@ -316,9 +326,11 @@ class LFS:
Returns
-------
- restored : bool
+ restored : bool or None
+ None if the request timed out.
+ False if the request failed.
True if a successful restore request was made, or if
- `path` was already restored. False otherwise.
+ `path` was already restored.
"""
state = self.hsm_state(path)
@@ -331,7 +343,10 @@ class LFS:
if state != HSMState.RELEASED:
return True
- return self.run_lfs("hsm_restore", path) is not None
+ result = self.run_lfs("hsm_restore", path, timeout=60)
+ if result is None or result is False:
+ return result
+ return True
def hsm_release(self, path: os.PathLike) -> bool:
"""Trigger release of `path` from disk.
@@ -363,4 +378,4 @@ class LFS:
return False
# Otherwise send the request
- return self.run_lfs("hsm_release", path) is not None
+ return self.run_lfs("hsm_release", path) is not False
diff --git a/alpenhorn/io/lustrehsm.py b/alpenhorn/io/lustrehsm.py
index e4959da..3d3a160 100644
--- a/alpenhorn/io/lustrehsm.py
+++ b/alpenhorn/io/lustrehsm.py
@@ -36,9 +36,14 @@ if TYPE_CHECKING:
from ..archive import ArchiveFileCopyRequest
from ..queue import FairMultiFIFOQueue
from ..update import UpdateableNode, UpdateableGroup
+del TYPE_CHECKING
log = logging.getLogger(__name__)
+# Retry delays (in seconds) after a successful or timed-out hsm_restore request
+RESTORE_TIMEOUT_RETRY = 1 * 3600 # 1 hour
+RESTORE_SUCCESS_RETRY = 4 * 3600 # 4 hours
+
class LustreHSMNodeRemote(BaseNodeRemote):
"""LustreHSMNodeRemote: information about a LustreHSM remote node."""
@@ -100,6 +105,10 @@ class LustreHSMNodeIO(LustreQuotaNodeIO):
# `ArchiveFile.id`s
self._restoring = set()
+ # The time.monotonic value after which we should try to restore again.
+ # Keys are elements in self._restoring.
+ self._restore_retry = dict()
+
# For informational purposes. Keys are elements in self._restoring.
self._restore_start = dict()
@@ -136,6 +145,7 @@ class LustreHSMNodeIO(LustreQuotaNodeIO):
if state == self._lfs.HSM_MISSING:
log.warning(f"Unable to restore {copy.path}: missing.")
self._restore_start.pop(copy.file.id, None)
+ self._restore_retry.pop(copy.file.id, None)
self._restoring.discard(copy.file.id)
return None
@@ -150,20 +160,53 @@ class LustreHSMNodeIO(LustreQuotaNodeIO):
f"after {pretty_deltat(deltat)}"
)
self._restore_start.pop(copy.file.id, None)
+ self._restore_retry.pop(copy.file.id, None)
self._restoring.discard(copy.file.id)
return False
# If we got here, copy is released.
- # Add this copy to the list of copies we're waiting on. Other tasks
- # can use this to see if the file they're interested in is already
- # "in use".
- if copy.file.id not in self._restoring:
- self._restoring.add(copy.file.id)
- self._restore_start[copy.file.id] = time.monotonic()
+ # Have we hit the retry time for an in-progress restore?
+ retry_restore = (
+ copy.file.id in self._restoring
+ and self._restore_retry[copy.file.id] <= time.monotonic()
+ )
+
+ if retry_restore or copy.file.id not in self._restoring:
+ # Add this copy to the list of copies we're waiting on. Other tasks
+ # can use this to see if the file they're interested in is already
+ # "in use".
+ if not retry_restore:
+ self._restoring.add(copy.file.id)
+ self._restore_start[copy.file.id] = time.monotonic()
+
+ # Try to restore it.
+ result = self._lfs.hsm_restore(copy.path)
+ log.warning(f"restore result: {result}")
+
+ if result is False:
+ # Reqeust failed. Abandon the restore attempt entirely,
+ # in case it was deleted from the node.
+ self._restore_retry.pop(copy.file.id, None)
+ self._restore_start.pop(copy.file.id, None)
+ self._restoring.discard(copy.file.id)
- # Restore it.
- self._lfs.hsm_restore(copy.path)
+ # Report failure
+ return None
+ elif result is None:
+ # Request timeout. This seems to happen when the file
+ # is already being restored, but let's try again in a
+ # little while, just to be safe.
+ self._restore_retry[copy.file.id] = (
+ time.monotonic() + RESTORE_TIMEOUT_RETRY
+ )
+ else:
+ # Request success; restore should be in-progress, but
+ # we'll still schedule a retry for some time in the future
+ # to guard against HSM forgetting/abandonning our request
+ self._restore_retry[copy.file.id] = (
+ time.monotonic() + RESTORE_SUCCESS_RETRY
+ )
# Tell the caller to wait
return True
diff --git a/alpenhorn/util.py b/alpenhorn/util.py
index b41f22d..1a28ca8 100644
--- a/alpenhorn/util.py
+++ b/alpenhorn/util.py
@@ -51,9 +51,7 @@ def run_command(
except subprocess.TimeoutExpired:
log.warning(f"Process overrun [timeout={timeout}]: " + " ".join(cmd))
proc.kill()
- stdout_val = ""
- stderr_val = ""
- retval = None
+ return (None, "", "")
return (
retval,
| Add timeout to `LustreHSMNodeIO._restore_wait`
To avoid the potential for locking-up all the workers due to restore requests being abandonned/forgotten about/ignored by HSM without alpenhorn's knowledge.
https://github.com/radiocosmology/alpenhorn/blob/4670ec285f810bef19199fd4a9cc2848d81e539e/alpenhorn/io/lustrehsm.py#L113
Probably best to have something configurable via conf file, but I want to run as-is for a while to see what timescale would be reasonable as a default here. | radiocosmology/alpenhorn | diff --git a/tests/conftest.py b/tests/conftest.py
index a057a8a..0608f86 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -39,6 +39,13 @@ def pytest_configure(config):
"to indicate the desired quota that LFS.quota_remaining "
"should return.",
)
+ config.addinivalue_line(
+ "markers",
+ "lfs_hsm_restore_result(result): "
+ "used on tests which mock alpenhorn.io.lfs.LFS "
+ "to indicate the result of the hsm_restore call. result "
+ "may be 'fail', 'timeout', or 'wait'",
+ )
config.addinivalue_line(
"markers",
"lfs_dont_mock(*method_names): "
@@ -225,7 +232,20 @@ def mock_lfs(have_lfs, request):
raise ValueError("Bad state in lfs_hsm_state marker: {state} for path {path}")
def _mocked_lfs_hsm_restore(self, path):
- nonlocal lfs_hsm_state
+ nonlocal request, lfs_hsm_state
+
+ marker = request.node.get_closest_marker("lfs_hsm_restore_result")
+ if marker:
+ if marker.args[0] == "fail":
+ # Return failure
+ return False
+ if marker.args[0] == "timeout":
+ # Return timeout
+ return None
+ if marker.args[0] == "wait":
+ # Return true (successful request)
+ # without changing state to "restored"
+ return True
# de-pathlib-ify
path = str(path)
diff --git a/tests/io/test_lfs.py b/tests/io/test_lfs.py
index d4e93e7..9a54c3f 100644
--- a/tests/io/test_lfs.py
+++ b/tests/io/test_lfs.py
@@ -40,7 +40,7 @@ def test_run_lfs_fail(have_lfs, mock_run_command):
lfs = LFS(None)
- assert lfs.run_lfs("arg1", "arg2") is None
+ assert lfs.run_lfs("arg1", "arg2") is False
assert mock_run_command() == {
"cmd": ["LFS", "arg1", "arg2"],
"kwargs": dict(),
@@ -255,7 +255,7 @@ def test_hsm_restore(mock_lfs, mock_run_command):
lfs = LFS("qgroup")
- assert not lfs.hsm_restore("/missing")
+ assert lfs.hsm_restore("/missing") is False
assert lfs.hsm_restore("/unarchived")
assert lfs.hsm_restore("/restored")
@@ -268,6 +268,17 @@ def test_hsm_restore(mock_lfs, mock_run_command):
assert "/released" in mock_run_command()["cmd"]
[email protected]_command_result(None, "", "")
[email protected]_hsm_state({"/released": "released"})
[email protected]_dont_mock("hsm_restore")
+def test_hsm_restore_timeout(mock_lfs, mock_run_command):
+ """Test hsm_restore() timeout."""
+
+ lfs = LFS("qgroup")
+
+ assert lfs.hsm_restore("/released") is None
+
+
@pytest.mark.lfs_hsm_state(
{
"/missing": "missing",
diff --git a/tests/io/test_lustrehsmnode.py b/tests/io/test_lustrehsmnode.py
index 9ded3fa..25992db 100644
--- a/tests/io/test_lustrehsmnode.py
+++ b/tests/io/test_lustrehsmnode.py
@@ -262,7 +262,6 @@ def test_check_released(xfs, queue, mock_lfs, node):
"""Test check on a non-ready, released file."""
xfs.create_file("/node/simpleacq/file1")
-
copy = ArchiveFileCopy.get(id=1)
copy.ready = False
copy.save()
@@ -536,3 +535,111 @@ def test_idle_update_not_ready(xfs, queue, mock_lfs, node):
assert not ArchiveFileCopy.get(id=4).ready
assert ArchiveFileCopy.get(id=4).last_update >= before
assert ArchiveFileCopy.get(id=4).has_file == "N"
+
+
[email protected]_hsm_state({"/node/simpleacq/file1": "released"})
[email protected]_hsm_restore_result("wait")
+def test_hsm_restore_twice(xfs, queue, mock_lfs, node):
+ """Test that only one restore request is made."""
+
+ # File is not ready, and maybe corrupt
+ xfs.create_file("/node/simpleacq/file1")
+ copy = ArchiveFileCopy.get(id=1)
+ copy.has_file = "M"
+ copy.ready = False
+ copy.save()
+
+ # Task will restore the file
+ node.io.check(copy)
+
+ # One item in queue now
+ assert queue.qsize == 1
+
+ # Run the task
+ task, key = queue.get()
+ task()
+ queue.task_done(key)
+
+ # Task has been requeued and is deferred
+ assert queue.deferred_size == 1
+ assert queue.qsize == 0
+
+ # Check the internal bookkeeping
+ assert copy.id in node.io._restoring
+ assert copy.id in node.io._restore_start
+ assert copy.id in node.io._restore_retry
+
+ # Try to add another task
+ node.io.check(copy)
+
+ # Second attempt shouldn't make a new task
+ # because the first one added copy.id to the
+ # list of in-progress restores
+ assert queue.deferred_size == 1
+ assert queue.qsize == 0
+
+
[email protected]_hsm_state({"/node/simpleacq/file1": "released"})
[email protected]_hsm_restore_result("timeout")
+def test_hsm_restore_timeout(xfs, queue, mock_lfs, node):
+ """Test handling of timeout in hsm_restore"""
+
+ # File is not ready, and maybe corrupt
+ xfs.create_file("/node/simpleacq/file1")
+ copy = ArchiveFileCopy.get(id=1)
+ copy.has_file = "M"
+ copy.ready = False
+ copy.save()
+
+ # Task will restore the file
+ node.io.check(copy)
+
+ # One item in queue now
+ assert queue.qsize == 1
+
+ # Run the task
+ task, key = queue.get()
+ task()
+ queue.task_done(key)
+
+ # Task has been requeued and is deferred
+ assert queue.deferred_size == 1
+ assert queue.qsize == 0
+
+ # Check the internal bookkeeping
+ assert copy.id in node.io._restoring
+ assert copy.id in node.io._restore_start
+ assert copy.id in node.io._restore_retry
+
+
[email protected]_hsm_state({"/node/simpleacq/file1": "released"})
[email protected]_hsm_restore_result("fail")
+def test_hsm_restore_fail(xfs, queue, mock_lfs, node):
+ """Test handling of hsm_restore failure"""
+
+ # File is not ready, and maybe corrupt
+ xfs.create_file("/node/simpleacq/file1")
+ copy = ArchiveFileCopy.get(id=1)
+ copy.has_file = "M"
+ copy.ready = False
+ copy.save()
+
+ # Task will restore the file
+ node.io.check(copy)
+
+ # One item in queue now
+ assert queue.qsize == 1
+
+ # Run the task
+ task, key = queue.get()
+ task()
+ queue.task_done(key)
+
+ # Task has been abandonned
+ assert queue.deferred_size == 0
+ assert queue.qsize == 0
+
+ # Check the internal bookkeeping
+ assert copy.id not in node.io._restoring
+ assert copy.id not in node.io._restore_start
+ assert copy.id not in node.io._restore_retry
diff --git a/tests/test_util.py b/tests/test_util.py
index 6ad06d6..ddaa9c1 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -26,7 +26,7 @@ def test_run_stdout():
def test_run_stderr():
- """Test getting stdout from run_command."""
+ """Test getting stderr from run_command."""
retval, stdout, stderr = util.run_command(
["python3", "-c", "import os; os.write(2, b'stderr')"]
)
@@ -35,6 +35,12 @@ def test_run_stderr():
assert retval == 0
+def test_run_timeout():
+ """Test run_command timing out."""
+ retval, stdout, stderr = util.run_command(["sleep", "10"], timeout=0.1)
+ assert retval is None
+
+
def test_md5sum_file(tmp_path):
"""Test util.md5sum_file"""
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.10",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/radiocosmology/alpenhorn.git@c4a1a50579188c99d35eec765d8be31a70f60154#egg=alpenhorn
bcrypt==4.3.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
chimedb @ git+https://github.com/chime-experiment/chimedb.git@d82f48eb0599393723e7ee5d756aff6c6830db32
click==8.1.8
concurrent-log-handler==0.9.25
cryptography==44.0.2
docker==7.1.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mysql-connector-python==8.0.29
packaging @ file:///croot/packaging_1734472117206/work
paramiko==3.5.1
peewee==3.17.9
pluggy @ file:///croot/pluggy_1733169602837/work
portalocker==3.1.1
protobuf==6.30.2
pycparser==2.22
pyfakefs==5.8.0
PyNaCl==1.5.0
pytest @ file:///croot/pytest_1738938843180/work
PyYAML==6.0.2
requests==2.32.3
sshtunnel==0.4.0
tabulate==0.9.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
ujson==5.10.0
urllib3==2.3.0
watchdog==6.0.0
| name: alpenhorn
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py310h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py310h06a4308_0
- pip=25.0=py310h06a4308_0
- pluggy=1.5.0=py310h06a4308_0
- pytest=8.3.4=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py310h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alpenhorn==2.0.0a1
- bcrypt==4.3.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- chimedb==24.8.0.post2+git.d82f48eb
- click==8.1.8
- concurrent-log-handler==0.9.25
- cryptography==44.0.2
- docker==7.1.0
- idna==3.10
- mysql-connector-python==8.0.29
- paramiko==3.5.1
- peewee==3.17.9
- portalocker==3.1.1
- protobuf==6.30.2
- pycparser==2.22
- pyfakefs==5.8.0
- pynacl==1.5.0
- pyyaml==6.0.2
- requests==2.32.3
- sshtunnel==0.4.0
- tabulate==0.9.0
- ujson==5.10.0
- urllib3==2.3.0
- watchdog==6.0.0
prefix: /opt/conda/envs/alpenhorn
| [
"tests/io/test_lfs.py::test_run_lfs_fail",
"tests/io/test_lfs.py::test_hsm_restore_timeout",
"tests/io/test_lustrehsmnode.py::test_hsm_restore_twice",
"tests/io/test_lustrehsmnode.py::test_hsm_restore_timeout",
"tests/io/test_lustrehsmnode.py::test_hsm_restore_fail",
"tests/test_util.py::test_run_timeout"
] | [
"tests/io/test_lustrehsmnode.py::test_filesize"
] | [
"tests/io/test_lfs.py::test_run_lfs_success",
"tests/io/test_lfs.py::test_run_lfs_stringify",
"tests/io/test_lfs.py::test_quota_auto_syntax_short",
"tests/io/test_lfs.py::test_quota_auto_with_default",
"tests/io/test_lfs.py::test_quota_auto_default_no_fixed",
"tests/io/test_lfs.py::test_quota_auto_syntax_quota",
"tests/io/test_lfs.py::test_quota_over",
"tests/io/test_lfs.py::test_quota_auto",
"tests/io/test_lfs.py::test_quota_longpath",
"tests/io/test_lfs.py::test_quota_fixed",
"tests/io/test_lfs.py::test_hsm_state_missing",
"tests/io/test_lfs.py::test_hsm_state_syntax",
"tests/io/test_lfs.py::test_hsm_state_unarchived",
"tests/io/test_lfs.py::test_hsm_state_restored",
"tests/io/test_lfs.py::test_hsm_state_released",
"tests/io/test_lfs.py::test_hsm_archived",
"tests/io/test_lfs.py::test_hsm_released",
"tests/io/test_lfs.py::test_hsm_restore",
"tests/io/test_lfs.py::test_hsm_release",
"tests/io/test_lustrehsmnode.py::test_init_no_headroom",
"tests/io/test_lustrehsmnode.py::test_init_bad_release_count",
"tests/io/test_lustrehsmnode.py::test_release_files_okay",
"tests/io/test_lustrehsmnode.py::test_release_files",
"tests/io/test_lustrehsmnode.py::test_before_update",
"tests/io/test_lustrehsmnode.py::test_open_binary",
"tests/io/test_lustrehsmnode.py::test_open_text",
"tests/io/test_lustrehsmnode.py::test_check_missing",
"tests/io/test_lustrehsmnode.py::test_check_ready_restored",
"tests/io/test_lustrehsmnode.py::test_check_released",
"tests/io/test_lustrehsmnode.py::test_check_ready_released",
"tests/io/test_lustrehsmnode.py::test_ready_path",
"tests/io/test_lustrehsmnode.py::test_ready_pull_restored",
"tests/io/test_lustrehsmnode.py::test_ready_pull_released",
"tests/io/test_lustrehsmnode.py::test_idle_update_empty",
"tests/io/test_lustrehsmnode.py::test_idle_update_ready",
"tests/io/test_lustrehsmnode.py::test_idle_update_not_ready",
"tests/test_util.py::test_run_retval0",
"tests/test_util.py::test_run_retval1",
"tests/test_util.py::test_run_stdout",
"tests/test_util.py::test_run_stderr",
"tests/test_util.py::test_md5sum_file",
"tests/test_util.py::test_gethostname_config",
"tests/test_util.py::test_gethostname_default",
"tests/test_util.py::test_pretty_bytes",
"tests/test_util.py::test_pretty_deltat"
] | [] | MIT License | 20,051 | 2,267 | [
"alpenhorn/io/lfs.py",
"alpenhorn/io/lustrehsm.py",
"alpenhorn/util.py"
] |
|
tobymao__sqlglot-4293 | 559e7bc5bbc77e94dea6de0470659b3c3fa6851f | 2024-10-25 10:28:24 | 559e7bc5bbc77e94dea6de0470659b3c3fa6851f | VaggelisD: It's actually not clear what's the behavior leading to the 2nd issue; If it was an alias, the `AS` version wouldn't lead to an error and it would be possible to select it directly. Here are some interesting examples of that:
- It's valid to include multiple identifiers before the type
```SQL
sqlite> create table test (foo bar baz TEXT);
sqlite> insert into test values (1);
sqlite> select * from test;
1
```
- The identifier resolution rules are not obvious; It seems that the column can be selected with a subset of the text
```
sqlite> select foo bar baz from test;
Parse error: near "baz": syntax error
select foo bar baz from test;
^--- error here
sqlite> select foo bar from test;
1
sqlite> select foo from test;
1
sqlite> select bar from test;
Parse error: no such column: bar
select bar from test;
^--- error here
sqlite> select baz from test;
Parse error: no such column: baz
select baz from test;
^--- error here
```
- Upon inspecting the table definition, the 2nd+ identifier(s) leak into the type (?)
```SQL
sqlite> PRAGMA table_info("test");
0|foo|bar baz TEXT|0||0
```
For these reasons, I'll revert the solution to (2) | diff --git a/sqlglot/dialects/sqlite.py b/sqlglot/dialects/sqlite.py
index d7126634..02698809 100644
--- a/sqlglot/dialects/sqlite.py
+++ b/sqlglot/dialects/sqlite.py
@@ -120,6 +120,14 @@ class SQLite(Dialect):
}
STRING_ALIASES = True
+ def _parse_unique(self) -> exp.UniqueColumnConstraint:
+ # Do not consume more tokens if UNIQUE is used as a standalone constraint, e.g:
+ # CREATE TABLE foo (bar TEXT UNIQUE REFERENCES baz ...)
+ if self._curr.text.upper() in self.CONSTRAINT_PARSERS:
+ return self.expression(exp.UniqueColumnConstraint)
+
+ return super()._parse_unique()
+
class Generator(generator.Generator):
JOIN_HINTS = False
TABLE_HINTS = False
| Parser doesn't recognize a foreign-key-clause within a column definition after unique for sqlite
When parsing a column definition with a foreign-key-clause, the parser does not expect the key word REFERENCES after UNIQUE
**Fully reproducible code snippet**
```python
from sqlglot import parse_one
s = '''
CREATE TABLE "store"
(
store_id INTEGER
primary key autoincrement,
manager_staff_id INTEGER not null
unique
references staff
on update cascade
)
'''
parse_one(s, read="sqlite")
```
```
ParseError Traceback (most recent call last)
Cell In[15], line 13
1 from sqlglot import parse_one
2 s = '''
3 CREATE TABLE "store"
4 (
(...)
11 )
12 '''
---> 13 parse_one(s, read="sqlite")
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/__init__.py:139, in parse_one(sql, read, dialect, into, **opts)
137 result = dialect.parse_into(into, sql, **opts)
138 else:
--> 139 result = dialect.parse(sql, **opts)
141 for expression in result:
142 if not expression:
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/dialects/dialect.py:920, in Dialect.parse(self, sql, **opts)
919 def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
--> 920 return self.parser(**opts).parse(self.tokenize(sql), sql)
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:1416, in Parser.parse(self, raw_tokens, sql)
1402 def parse(
1403 self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
1404 ) -> t.List[t.Optional[exp.Expression]]:
1405 """
1406 Parses a list of tokens and returns a list of syntax trees, one tree
1407 per parsed SQL statement.
(...)
1414 The list of the produced syntax trees.
1415 """
-> 1416 return self._parse(
1417 parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
1418 )
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:1485, in Parser._parse(self, parse_method, raw_tokens, sql)
1482 self._tokens = tokens
1483 self._advance()
-> 1485 expressions.append(parse_method(self))
1487 if self._index < len(self._tokens):
1488 self.raise_error("Invalid expression / Unexpected token")
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:1717, in Parser._parse_statement(self)
1714 return None
1716 if self._match_set(self.STATEMENT_PARSERS):
-> 1717 return self.STATEMENT_PARSERS[self._prev.token_type](self)
1719 if self._match_set(self.dialect.tokenizer.COMMANDS):
1720 return self._parse_command()
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:785, in Parser.<lambda>(self)
711 COLUMN_OPERATORS = {
712 TokenType.DOT: None,
713 TokenType.DCOLON: lambda self, this, to: self.expression(
(...)
744 ),
745 }
747 EXPRESSION_PARSERS = {
748 exp.Cluster: lambda self: self._parse_sort(exp.Cluster, TokenType.CLUSTER_BY),
749 exp.Column: lambda self: self._parse_column(),
(...)
775 "JOIN_TYPE": lambda self: self._parse_join_parts(),
776 }
778 STATEMENT_PARSERS = {
779 TokenType.ALTER: lambda self: self._parse_alter(),
780 TokenType.BEGIN: lambda self: self._parse_transaction(),
781 TokenType.CACHE: lambda self: self._parse_cache(),
782 TokenType.COMMENT: lambda self: self._parse_comment(),
783 TokenType.COMMIT: lambda self: self._parse_commit_or_rollback(),
784 TokenType.COPY: lambda self: self._parse_copy(),
--> 785 TokenType.CREATE: lambda self: self._parse_create(),
786 TokenType.DELETE: lambda self: self._parse_delete(),
787 TokenType.DESC: lambda self: self._parse_describe(),
788 TokenType.DESCRIBE: lambda self: self._parse_describe(),
789 TokenType.DROP: lambda self: self._parse_drop(),
790 TokenType.GRANT: lambda self: self._parse_grant(),
791 TokenType.INSERT: lambda self: self._parse_insert(),
792 TokenType.KILL: lambda self: self._parse_kill(),
793 TokenType.LOAD: lambda self: self._parse_load(),
794 TokenType.MERGE: lambda self: self._parse_merge(),
795 TokenType.PIVOT: lambda self: self._parse_simplified_pivot(),
796 TokenType.PRAGMA: lambda self: self.expression(exp.Pragma, this=self._parse_expression()),
797 TokenType.REFRESH: lambda self: self._parse_refresh(),
798 TokenType.ROLLBACK: lambda self: self._parse_commit_or_rollback(),
799 TokenType.SET: lambda self: self._parse_set(),
800 TokenType.TRUNCATE: lambda self: self._parse_truncate_table(),
801 TokenType.UNCACHE: lambda self: self._parse_uncache(),
802 TokenType.UPDATE: lambda self: self._parse_update(),
803 TokenType.USE: lambda self: self.expression(
804 exp.Use,
805 kind=self._parse_var_from_options(self.USABLES, raise_unmatched=False),
806 this=self._parse_table(schema=False),
807 ),
808 TokenType.SEMICOLON: lambda self: self.expression(exp.Semicolon),
809 }
811 UNARY_PARSERS = {
812 TokenType.PLUS: lambda self: self._parse_unary(), # Unary + is handled as a no-op
813 TokenType.NOT: lambda self: self.expression(exp.Not, this=self._parse_equality()),
(...)
817 TokenType.DPIPE_SLASH: lambda self: self.expression(exp.Cbrt, this=self._parse_unary()),
818 }
820 STRING_PARSERS = {
821 TokenType.HEREDOC_STRING: lambda self, token: self.expression(
822 exp.RawString, this=token.text
(...)
835 ),
836 }
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:1872, in Parser._parse_create(self)
1869 self._match(TokenType.COMMA)
1870 extend_props(self._parse_properties(before=True))
-> 1872 this = self._parse_schema(this=table_parts)
1874 # exp.Properties.Location.POST_SCHEMA and POST_WITH
1875 extend_props(self._parse_properties())
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:5426, in Parser._parse_schema(self, this)
5424 return this
5425 args = self._parse_csv(lambda: self._parse_constraint() or self._parse_field_def())
-> 5426 self._match_r_paren()
5427 return self.expression(exp.Schema, this=this, expressions=args)
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:7222, in Parser._match_r_paren(self, expression)
7220 def _match_r_paren(self, expression: t.Optional[exp.Expression] = None) -> None:
7221 if not self._match(TokenType.R_PAREN, expression=expression):
-> 7222 self.raise_error("Expecting )")
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:1529, in Parser.raise_error(self, message, token)
1517 error = ParseError.new(
1518 f"{message}. Line {token.line}, Col: {token.col}.\n"
1519 f" {start_context}\033[4m{highlight}\033[0m{end_context}",
(...)
1525 end_context=end_context,
1526 )
1528 if self.error_level == ErrorLevel.IMMEDIATE:
-> 1529 raise error
1531 self.errors.append(error)
ParseError: Expecting ). Line 8, Col: 17.
manager_staff_id INTEGER not null
unique
references staff
on update cascade
)
In [16]: from sqlglot import parse_one
...: s = '''
...: CREATE TABLE "store"
...: (
...: store_id INTEGER
...: primary key autoincrement,
...: manager_staff_id INTEGER not null
...: references staff
...: on update cascade
...: )
...: '''
...: parse_one(s, read="sqlite")
...:
Out[16]:
Create(
this=Schema(
this=Table(
this=Identifier(this=store, quoted=True)),
expressions=[
ColumnDef(
this=Identifier(this=store_id, quoted=False),
kind=DataType(this=Type.INT, nested=False),
constraints=[
ColumnConstraint(
kind=PrimaryKeyColumnConstraint()),
ColumnConstraint(
kind=AutoIncrementColumnConstraint())]),
ColumnDef(
this=Identifier(this=manager_staff_id, quoted=False),
kind=DataType(this=Type.INT, nested=False),
constraints=[
ColumnConstraint(
kind=NotNullColumnConstraint()),
ColumnConstraint(
kind=Reference(
this=Table(
this=Identifier(this=staff, quoted=False)),
options=[
ON update CASCADE]))])]),
kind=TABLE)
In [17]: from sqlglot import parse_one
...: s = '''
...: CREATE TABLE "store"
...: (
...: store_id INTEGER
...: primary key autoincrement,
...: manager_staff_id INTEGER not null
...: unique
...: references staff
...: on update cascade
...: )
...: '''
...: parse_one(s, read="sqlite")
...:
---------------------------------------------------------------------------
ParseError Traceback (most recent call last)
Cell In[17], line 13
1 from sqlglot import parse_one
2 s = '''
3 CREATE TABLE "store"
4 (
(...)
11 )
12 '''
---> 13 parse_one(s, read="sqlite")
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/__init__.py:139, in parse_one(sql, read, dialect, into, **opts)
137 result = dialect.parse_into(into, sql, **opts)
138 else:
--> 139 result = dialect.parse(sql, **opts)
141 for expression in result:
142 if not expression:
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/dialects/dialect.py:920, in Dialect.parse(self, sql, **opts)
919 def parse(self, sql: str, **opts) -> t.List[t.Optional[exp.Expression]]:
--> 920 return self.parser(**opts).parse(self.tokenize(sql), sql)
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:1416, in Parser.parse(self, raw_tokens, sql)
1402 def parse(
1403 self, raw_tokens: t.List[Token], sql: t.Optional[str] = None
1404 ) -> t.List[t.Optional[exp.Expression]]:
1405 """
1406 Parses a list of tokens and returns a list of syntax trees, one tree
1407 per parsed SQL statement.
(...)
1414 The list of the produced syntax trees.
1415 """
-> 1416 return self._parse(
1417 parse_method=self.__class__._parse_statement, raw_tokens=raw_tokens, sql=sql
1418 )
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:1485, in Parser._parse(self, parse_method, raw_tokens, sql)
1482 self._tokens = tokens
1483 self._advance()
-> 1485 expressions.append(parse_method(self))
1487 if self._index < len(self._tokens):
1488 self.raise_error("Invalid expression / Unexpected token")
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:1717, in Parser._parse_statement(self)
1714 return None
1716 if self._match_set(self.STATEMENT_PARSERS):
-> 1717 return self.STATEMENT_PARSERS[self._prev.token_type](self)
1719 if self._match_set(self.dialect.tokenizer.COMMANDS):
1720 return self._parse_command()
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:785, in Parser.<lambda>(self)
711 COLUMN_OPERATORS = {
712 TokenType.DOT: None,
713 TokenType.DCOLON: lambda self, this, to: self.expression(
(...)
744 ),
745 }
747 EXPRESSION_PARSERS = {
748 exp.Cluster: lambda self: self._parse_sort(exp.Cluster, TokenType.CLUSTER_BY),
749 exp.Column: lambda self: self._parse_column(),
(...)
775 "JOIN_TYPE": lambda self: self._parse_join_parts(),
776 }
778 STATEMENT_PARSERS = {
779 TokenType.ALTER: lambda self: self._parse_alter(),
780 TokenType.BEGIN: lambda self: self._parse_transaction(),
781 TokenType.CACHE: lambda self: self._parse_cache(),
782 TokenType.COMMENT: lambda self: self._parse_comment(),
783 TokenType.COMMIT: lambda self: self._parse_commit_or_rollback(),
784 TokenType.COPY: lambda self: self._parse_copy(),
--> 785 TokenType.CREATE: lambda self: self._parse_create(),
786 TokenType.DELETE: lambda self: self._parse_delete(),
787 TokenType.DESC: lambda self: self._parse_describe(),
788 TokenType.DESCRIBE: lambda self: self._parse_describe(),
789 TokenType.DROP: lambda self: self._parse_drop(),
790 TokenType.GRANT: lambda self: self._parse_grant(),
791 TokenType.INSERT: lambda self: self._parse_insert(),
792 TokenType.KILL: lambda self: self._parse_kill(),
793 TokenType.LOAD: lambda self: self._parse_load(),
794 TokenType.MERGE: lambda self: self._parse_merge(),
795 TokenType.PIVOT: lambda self: self._parse_simplified_pivot(),
796 TokenType.PRAGMA: lambda self: self.expression(exp.Pragma, this=self._parse_expression()),
797 TokenType.REFRESH: lambda self: self._parse_refresh(),
798 TokenType.ROLLBACK: lambda self: self._parse_commit_or_rollback(),
799 TokenType.SET: lambda self: self._parse_set(),
800 TokenType.TRUNCATE: lambda self: self._parse_truncate_table(),
801 TokenType.UNCACHE: lambda self: self._parse_uncache(),
802 TokenType.UPDATE: lambda self: self._parse_update(),
803 TokenType.USE: lambda self: self.expression(
804 exp.Use,
805 kind=self._parse_var_from_options(self.USABLES, raise_unmatched=False),
806 this=self._parse_table(schema=False),
807 ),
808 TokenType.SEMICOLON: lambda self: self.expression(exp.Semicolon),
809 }
811 UNARY_PARSERS = {
812 TokenType.PLUS: lambda self: self._parse_unary(), # Unary + is handled as a no-op
813 TokenType.NOT: lambda self: self.expression(exp.Not, this=self._parse_equality()),
(...)
817 TokenType.DPIPE_SLASH: lambda self: self.expression(exp.Cbrt, this=self._parse_unary()),
818 }
820 STRING_PARSERS = {
821 TokenType.HEREDOC_STRING: lambda self, token: self.expression(
822 exp.RawString, this=token.text
(...)
835 ),
836 }
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:1872, in Parser._parse_create(self)
1869 self._match(TokenType.COMMA)
1870 extend_props(self._parse_properties(before=True))
-> 1872 this = self._parse_schema(this=table_parts)
1874 # exp.Properties.Location.POST_SCHEMA and POST_WITH
1875 extend_props(self._parse_properties())
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:5426, in Parser._parse_schema(self, this)
5424 return this
5425 args = self._parse_csv(lambda: self._parse_constraint() or self._parse_field_def())
-> 5426 self._match_r_paren()
5427 return self.expression(exp.Schema, this=this, expressions=args)
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:7222, in Parser._match_r_paren(self, expression)
7220 def _match_r_paren(self, expression: t.Optional[exp.Expression] = None) -> None:
7221 if not self._match(TokenType.R_PAREN, expression=expression):
-> 7222 self.raise_error("Expecting )")
File ~/.local/share/conda/envs/tuning/lib/python3.12/site-packages/sqlglot/parser.py:1529, in Parser.raise_error(self, message, token)
1517 error = ParseError.new(
1518 f"{message}. Line {token.line}, Col: {token.col}.\n"
1519 f" {start_context}\033[4m{highlight}\033[0m{end_context}",
(...)
1525 end_context=end_context,
1526 )
1528 if self.error_level == ErrorLevel.IMMEDIATE:
-> 1529 raise error
1531 self.errors.append(error)
ParseError: Expecting ). Line 8, Col: 17.
manager_staff_id INTEGER not null
unique
references staff
on update cascade
)
```
| tobymao/sqlglot | diff --git a/tests/dialects/test_sqlite.py b/tests/dialects/test_sqlite.py
index 230c0e8f..e37cdc8e 100644
--- a/tests/dialects/test_sqlite.py
+++ b/tests/dialects/test_sqlite.py
@@ -222,3 +222,7 @@ class TestSQLite(Validator):
"mysql": "CREATE TABLE `x` (`Name` VARCHAR(200) NOT NULL)",
},
)
+
+ self.validate_identity(
+ "CREATE TABLE store (store_id INTEGER PRIMARY KEY AUTOINCREMENT, mgr_id INTEGER NOT NULL UNIQUE REFERENCES staff ON UPDATE CASCADE)"
+ )
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 25.27 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
identify==2.6.9
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pre_commit==4.2.0
Pygments==2.19.1
pytest @ file:///croot/pytest_1738938843180/work
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.4.3
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@559e7bc5bbc77e94dea6de0470659b3c3fa6851f#egg=sqlglot
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- distlib==0.3.9
- duckdb==1.2.1
- filelock==3.18.0
- identify==2.6.9
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pre-commit==4.2.0
- pygments==2.19.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.4.3
- six==1.17.0
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_sqlite.py::TestSQLite::test_ddl"
] | [] | [
"tests/dialects/test_sqlite.py::TestSQLite::test_datediff",
"tests/dialects/test_sqlite.py::TestSQLite::test_hexadecimal_literal",
"tests/dialects/test_sqlite.py::TestSQLite::test_longvarchar_dtype",
"tests/dialects/test_sqlite.py::TestSQLite::test_sqlite",
"tests/dialects/test_sqlite.py::TestSQLite::test_strftime",
"tests/dialects/test_sqlite.py::TestSQLite::test_warnings",
"tests/dialects/test_sqlite.py::TestSQLite::test_window_null_treatment"
] | [] | MIT License | 20,057 | 211 | [
"sqlglot/dialects/sqlite.py"
] |
joshy__striprtf-54 | e68ead6986a4312648e6126a1263167bf371b5d6 | 2024-10-25 11:39:25 | e68ead6986a4312648e6126a1263167bf371b5d6 | diff --git a/striprtf/striprtf.py b/striprtf/striprtf.py
index fe3bf4a..1ea79b8 100644
--- a/striprtf/striprtf.py
+++ b/striprtf/striprtf.py
@@ -1,11 +1,11 @@
import re
import codecs
+
"""
Taken from https://gist.github.com/gilsondev/7c1d2d753ddb522e7bc22511cfb08676
and modified for better output of tables.
"""
-
# fmt: off
# control words which specify a "destination".
destinations = frozenset((
@@ -16,7 +16,7 @@ destinations = frozenset((
'do','doccomm','docvar','dptxbxtext','ebcend','ebcstart','factoidname','falt',
'fchars','ffdeftext','ffentrymcr','ffexitmcr','ffformat','ffhelptext','ffl',
'ffname','ffstattext','file','filetbl','fldinst','fldtype',
- 'fname','fontemb','fontfile','fonttbl','footer','footerf','footerl','footerr',
+ 'fname','fontemb','fontfile','footer','footerf','footerl','footerr',
'footnote','formfield','ftncn','ftnsep','ftnsepc','g','generator','gridtbl',
'header','headerf','headerl','headerr','hl','hlfr','hlinkbase','hlloc','hlsrc',
'hsv','htmltag','info','keycode','keywords','latentstyles','lchars','levelnumbers',
@@ -49,15 +49,50 @@ destinations = frozenset((
'wgrffmtfilter','windowcaption','writereservation','writereservhash','xe','xform',
'xmlattrname','xmlattrvalue','xmlclose','xmlname','xmlnstbl',
'xmlopen',
- ))
+))
# fmt: on
-
+charset_map = {
+ 0: 'cp1252', # Default
+ 42: 'cp1252', # Symbol
+ 77: 'mac_roman', # Mac Roman
+ 78: 'mac_japanese', # Mac Japanese
+ 79: 'mac_chinesetrad', # Mac Traditional Chinese
+ 80: 'mac_korean', # Mac Korean
+ 81: 'mac_arabic', # Mac Arabic
+ 82: 'mac_hebrew', # Mac Hebrew
+ 83: 'mac_greek', # Mac Greek
+ 84: 'mac_cyrillic', # Mac Cyrillic
+ 85: 'mac_chinesesimp', # Mac Simplified Chinese
+ 86: 'mac_rumanian', # Mac Romanian
+ 87: 'mac_ukrainian', # Mac Ukrainian
+ 88: 'mac_thai', # Mac Thai
+ 89: 'mac_ce', # Mac Central European
+ 128: 'cp932', # Japanese
+ 129: 'cp949', # Korean
+ 130: 'cp1361', # Johab (Korean)
+ 134: 'cp936', # Simplified Chinese (GBK)
+ 136: 'cp950', # Traditional Chinese (Big5)
+ 161: 'cp1253', # Greek
+ 162: 'cp1254', # Turkish
+ 163: 'cp1258', # Vietnamese
+ 177: 'cp1255', # Hebrew
+ 178: 'cp1256', # Arabic
+ 186: 'cp1257', # Baltic
+ 204: 'cp1251', # Cyrillic
+ 222: 'cp874', # Thai
+ 238: 'cp1250', # Eastern European
+ 254: 'cp437', # OEM United States
+ 255: 'cp850', # OEM Multilingual Latin 1
+ }
# Translation of some special characters.
-specialchars = {
+# and section characters reset formatting
+sectionchars = {
"par": "\n",
"sect": "\n\n",
- "page": "\n\n",
+ "page": "\n\n"
+}
+specialchars = {
"line": "\n",
"tab": "\t",
"emdash": "\u2014",
@@ -82,7 +117,7 @@ specialchars = {
"-": "\xad",
"_": "\u2011"
-}
+} | sectionchars
PATTERN = re.compile(
r"\\([a-z]{1,32})(-?\d{1,10})?[ ]?|\\'([0-9a-f]{2})|\\([^a-z])|([{}])|[\r\n]+|(.)",
@@ -94,7 +129,8 @@ HYPERLINKS = re.compile(
re.IGNORECASE
)
-
+
+
def rtf_to_text(text, encoding="cp1252", errors="strict"):
""" Converts the rtf text to plain text.
@@ -103,7 +139,7 @@ def rtf_to_text(text, encoding="cp1252", errors="strict"):
text : str
The rtf text
encoding : str
- Input encoding which is ignored if the rtf file contains an explicit codepage directive,
+ Input encoding which is ignored if the rtf file contains an explicit codepage directive,
as it is typically the case. Defaults to `cp1252` encoding as it the most commonly used.
errors : str
How to handle encoding errors. Default is "strict", which throws an error. Another
@@ -114,9 +150,13 @@ def rtf_to_text(text, encoding="cp1252", errors="strict"):
str
the converted rtf text as a python unicode string
"""
- text = re.sub(HYPERLINKS, "\\1(\\2)", text) # captures links like link_text(http://link_dest)
+ text = re.sub(HYPERLINKS, "\\1(\\2)", text) # captures links like link_text(http://link_dest)
stack = []
+ fonttbl = {}
+ default_font = None
+ current_font = None
ignorable = False # Whether this group (and all inside it) are "ignorable".
+ suppress_output = False # Whether this group (and all inside it) are "ignorable".
ucskip = 1 # Number of ASCII characters to skip after a unicode character.
curskip = 0 # Number of ASCII characters left to skip
hexes = None
@@ -125,17 +165,17 @@ def rtf_to_text(text, encoding="cp1252", errors="strict"):
for match in PATTERN.finditer(text):
word, arg, _hex, char, brace, tchar = match.groups()
if hexes and not _hex:
- out += bytes.fromhex(hexes).decode(encoding=encoding, errors=errors)
+ out += bytes.fromhex(hexes).decode(encoding=fonttbl.get(current_font, {'encoding': encoding}).get('encoding'), errors=errors)
hexes = None
if brace:
curskip = 0
if brace == "{":
# Push state
- stack.append((ucskip, ignorable))
+ stack.append((ucskip, ignorable, suppress_output))
elif brace == "}":
# Pop state
if stack:
- ucskip, ignorable = stack.pop()
+ ucskip, ignorable, suppress_output = stack.pop()
# sample_3.rtf throws an IndexError because of stack being empty.
# don't know right now how this could happen, so for now this is
# a ugly hack to prevent it
@@ -145,8 +185,10 @@ def rtf_to_text(text, encoding="cp1252", errors="strict"):
elif char: # \x (not a letter)
curskip = 0
if char in specialchars:
+ if char in sectionchars:
+ current_font = default_font
if not ignorable:
- out += specialchars[char]
+ out += specialchars[char]
elif char == "*":
ignorable = True
elif word: # \foo
@@ -176,6 +218,20 @@ def rtf_to_text(text, encoding="cp1252", errors="strict"):
c += 0x10000
out += chr(c)
curskip = ucskip
+ elif word == "f":
+ current_font = arg
+ if current_font not in fonttbl:
+ fonttbl[current_font] = {}
+ elif word == "fonttbl":
+ fonttbl = {}
+ suppress_output = True
+ elif word == "fcharset":
+ fonttbl[current_font]['charset'] = arg
+ fonttbl[current_font]['encoding'] = charset_map.get(int(arg), encoding)
+ ignorable = True
+ elif word == "deff":
+ default_font = arg
+
elif _hex: # \'xx
if curskip > 0:
curskip -= 1
@@ -188,6 +244,10 @@ def rtf_to_text(text, encoding="cp1252", errors="strict"):
elif tchar:
if curskip > 0:
curskip -= 1
- elif not ignorable:
+ elif not ignorable and not suppress_output:
out += tchar
+ print(fonttbl)
+
+
+
return out
| fonttbl support: Fonts might enforce different encoding per section
This is the rtf I have.
```
{\rtf1\fbidis\ansi\ansicpg1252\deff0\nouicompat\deflang1033{\fonttbl{\f0\fnil\fcharset0 Microsoft Sans Serif;}{\f1\fswiss\fcharset134 Microsoft YaHei;}{\f2\fnil Microsoft Sans Serif;}}
{\*\generator Riched20 10.0.19041}\viewkind4\uc1
\pard\ltrpar\f0\fs17 [HEADER]\par
[/HEADER]\par
[BODY]\par
[1]\par
00:00:44:22\par
00:00:48:05\par
\f1\'b8\'f9\'be\'dd\'d5\'e6\'ca\'b5\'b9\'ca\'ca\'c2\'b4\'b4\'d7\'f7\f0\par
```
fonttbl is in the destination list and ignored.
todo: read font table and switch encoding if necessary. | joshy/striprtf | diff --git a/tests/test_issue_53.py b/tests/test_issue_53.py
new file mode 100644
index 0000000..637ab20
--- /dev/null
+++ b/tests/test_issue_53.py
@@ -0,0 +1,17 @@
+import unittest
+from pathlib import Path
+
+from striprtf.striprtf import rtf_to_text
+
+RTF = r"""{\rtf1\fbidis\ansi\ansicpg1252\deff0\nouicompat\deflang1033{\fonttbl{\f0\fnil\fcharset0 Microsoft Sans Serif;}{\f1\fswiss\fcharset134 Microsoft YaHei;}{\f2\fnil Microsoft Sans Serif;}}
+{\*\generator Riched20 10.0.19041}\viewkind4\uc1
+\pard\ltrpar\f0\fs17
+00:00:44:22\par
+00:00:48:05\par
+\f1\'b8\'f9\'be\'dd\'d5\'e6\'ca\'b5\'b9\'ca\'ca\'c2\'b4\'b4\'d7\'f7\f0\par"""
+
+
+class Table(unittest.TestCase):
+ def test_parse_w_fontbl(self):
+ result = rtf_to_text(RTF)
+ self.assertEqual("00:00:44:22\n00:00:48:05\n根据真实故事创作\n", result)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
-e git+https://github.com/joshy/striprtf.git@e68ead6986a4312648e6126a1263167bf371b5d6#egg=striprtf
tomli==2.2.1
| name: striprtf
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/striprtf
| [
"tests/test_issue_53.py::Table::test_parse_w_fontbl"
] | [] | [] | [] | BSD 3-Clause "New" or "Revised" License | 20,058 | 2,324 | [
"striprtf/striprtf.py"
] |
|
yt-project__unyt-531 | 55f1ac47b7adb967c14981029633bd5d16d1a7aa | 2024-10-25 13:56:11 | 41b4b3614fd3bf61f07be60d405fc8a8e4781194 | diff --git a/unyt/_array_functions.py b/unyt/_array_functions.py
index 4b22dd2..646629e 100644
--- a/unyt/_array_functions.py
+++ b/unyt/_array_functions.py
@@ -358,72 +358,72 @@ def block(arrays):
@implements(np.fft.fft)
def ftt_fft(a, *args, **kwargs):
- return np.fft.fft._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.fft._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.fft2)
def ftt_fft2(a, *args, **kwargs):
- return np.fft.fft2._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.fft2._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.fftn)
def ftt_fftn(a, *args, **kwargs):
- return np.fft.fftn._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.fftn._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.hfft)
def ftt_hfft(a, *args, **kwargs):
- return np.fft.hfft._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.hfft._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.rfft)
def ftt_rfft(a, *args, **kwargs):
- return np.fft.rfft._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.rfft._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.rfft2)
def ftt_rfft2(a, *args, **kwargs):
- return np.fft.rfft2._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.rfft2._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.rfftn)
def ftt_rfftn(a, *args, **kwargs):
- return np.fft.rfftn._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.rfftn._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.ifft)
def ftt_ifft(a, *args, **kwargs):
- return np.fft.ifft._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.ifft._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.ifft2)
def ftt_ifft2(a, *args, **kwargs):
- return np.fft.ifft2._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.ifft2._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.ifftn)
def ftt_ifftn(a, *args, **kwargs):
- return np.fft.ifftn._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.ifftn._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.ihfft)
def ftt_ihfft(a, *args, **kwargs):
- return np.fft.ihfft._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.ihfft._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.irfft)
def ftt_irfft(a, *args, **kwargs):
- return np.fft.irfft._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.irfft._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.irfft2)
def ftt_irfft2(a, *args, **kwargs):
- return np.fft.irfft2._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.irfft2._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.irfftn)
def ftt_irfftn(a, *args, **kwargs):
- return np.fft.irfftn._implementation(np.asarray(a), *args, **kwargs) / a.units
+ return np.fft.irfftn._implementation(np.asarray(a), *args, **kwargs) * a.units
@implements(np.fft.fftshift)
| unyt processes units for FFTs incorrectly
* unyt version: 3.0.3
* Python version: 3.12
* Operating System: MacOS
### Description
When using a fast Fourier transform, I expect the units that are returned to be the same as the original function. Dimensionally, the Fourier transform is a decomposition of the form:
$$
f(x) ~= \int F(k) e^{-ikx} \cdot dk
$$
However, when using the `np.fft` functions on `unyt` arrays, they return units that are the inverse of the original array.
For example, the minimal working example below:
```python
import matplotlib.pyplot as plt
import unyt
import numpy as np
# Dirac Delta
x = unyt.unyt_array(np.linspace(-5, 5, 128), "s", name="Time")
y = unyt.unyt_array(np.zeros_like(x.v), "K", name="Temperature")
y[len(y) // 2] = 1
# FFT that thing
fft = np.fft.fft(y)
amps = np.sqrt((fft * fft.conj()).real)
dk = 2.0 * np.pi / (10.0 * unyt.s)
k = np.fft.fftfreq(len(x), d=1.0 / dk) * len(x)
k.name = "Wavenumber $k$"
amps.name = "FFT of Temperature"
# Plot it up
with unyt.matplotlib_support:
fig, (axreal, axfft) = plt.subplots(1, 2, figsize=(6, 2.5))
axreal.plot(x, y)
axfft.plot(k, amps)
fig.tight_layout()
plt.show()
```
This produces the following figure:
<img width="879" alt="Screenshot 2024-10-25 at 8 20 19 AM" src="https://github.com/user-attachments/assets/f424a07f-2a50-41b7-996e-164781532950">
| yt-project/unyt | diff --git a/unyt/tests/test_array_functions.py b/unyt/tests/test_array_functions.py
index 4903e93..bfd900a 100644
--- a/unyt/tests/test_array_functions.py
+++ b/unyt/tests/test_array_functions.py
@@ -923,7 +923,7 @@ def test_fft_1D(func):
x1 = [0, 1, 2] * cm
res = func(x1)
assert type(res) is unyt_array
- assert res.units == (1 / cm).units
+ assert res.units == (1 * cm).units
@pytest.mark.parametrize(
@@ -943,7 +943,7 @@ def test_fft_ND(func):
x1 = [[0, 1, 2], [0, 1, 2], [0, 1, 2]] * cm
res = func(x1)
assert type(res) is unyt_array
- assert res.units == (1 / cm).units
+ assert res.units == (1 * cm).units
@pytest.mark.parametrize("func", [np.fft.fftshift, np.fft.ifftshift])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-doctestplus"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
babel==2.17.0
certifi==2025.1.31
charset-normalizer==3.4.1
contourpy==1.3.0
cycler==0.12.1
docutils==0.21.2
exceptiongroup==1.2.2
fonttools==4.56.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig==2.1.0
Jinja2==3.1.6
kiwisolver==1.4.7
MarkupSafe==3.0.2
matplotlib==3.9.4
mpmath==1.3.0
numpy==2.0.2
packaging==24.2
pillow==11.1.0
pluggy==1.5.0
Pygments==2.19.1
pyparsing==3.2.3
pytest==8.3.5
pytest-doctestplus==1.4.0
python-dateutil==2.9.0.post0
requests==2.32.3
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
sympy==1.13.3
tomli==2.2.1
-e git+https://github.com/yt-project/unyt.git@55f1ac47b7adb967c14981029633bd5d16d1a7aa#egg=unyt
urllib3==2.3.0
zipp==3.21.0
| name: unyt
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- babel==2.17.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- contourpy==1.3.0
- cycler==0.12.1
- docutils==0.21.2
- exceptiongroup==1.2.2
- fonttools==4.56.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- jinja2==3.1.6
- kiwisolver==1.4.7
- markupsafe==3.0.2
- matplotlib==3.9.4
- mpmath==1.3.0
- numpy==2.0.2
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pygments==2.19.1
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-doctestplus==1.4.0
- python-dateutil==2.9.0.post0
- requests==2.32.3
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sympy==1.13.3
- tomli==2.2.1
- unyt==3.0.3.post20+g55f1ac4
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/unyt
| [
"unyt/tests/test_array_functions.py::test_fft_1D[fft]",
"unyt/tests/test_array_functions.py::test_fft_1D[hfft]",
"unyt/tests/test_array_functions.py::test_fft_1D[rfft]",
"unyt/tests/test_array_functions.py::test_fft_1D[ifft]",
"unyt/tests/test_array_functions.py::test_fft_1D[ihfft]",
"unyt/tests/test_array_functions.py::test_fft_1D[irfft]",
"unyt/tests/test_array_functions.py::test_fft_ND[fft2]",
"unyt/tests/test_array_functions.py::test_fft_ND[fftn]",
"unyt/tests/test_array_functions.py::test_fft_ND[rfft2]",
"unyt/tests/test_array_functions.py::test_fft_ND[rfftn]",
"unyt/tests/test_array_functions.py::test_fft_ND[ifft2]",
"unyt/tests/test_array_functions.py::test_fft_ND[ifftn]",
"unyt/tests/test_array_functions.py::test_fft_ND[irfft2]",
"unyt/tests/test_array_functions.py::test_fft_ND[irfftn]"
] | [] | [
"unyt/tests/test_array_functions.py::test_wrapping_completeness",
"unyt/tests/test_array_functions.py::test_unit_validation[arrays0]",
"unyt/tests/test_array_functions.py::test_unit_validation[arrays1]",
"unyt/tests/test_array_functions.py::test_unit_validation[arrays2]",
"unyt/tests/test_array_functions.py::test_unit_validation_dimensionless_factors",
"unyt/tests/test_array_functions.py::test_array_repr",
"unyt/tests/test_array_functions.py::test_dot_vectors",
"unyt/tests/test_array_functions.py::test_dot_matrices[None]",
"unyt/tests/test_array_functions.py::test_dot_matrices[pure",
"unyt/tests/test_array_functions.py::test_dot_matrices[same",
"unyt/tests/test_array_functions.py::test_dot_matrices[convertible",
"unyt/tests/test_array_functions.py::test_dot_mixed_ndarray_unyt_array",
"unyt/tests/test_array_functions.py::test_invalid_dot_matrices",
"unyt/tests/test_array_functions.py::test_vdot",
"unyt/tests/test_array_functions.py::test_inner",
"unyt/tests/test_array_functions.py::test_outer",
"unyt/tests/test_array_functions.py::test_kron",
"unyt/tests/test_array_functions.py::test_linalg_inv",
"unyt/tests/test_array_functions.py::test_linalg_tensorinv",
"unyt/tests/test_array_functions.py::test_linalg_pinv",
"unyt/tests/test_array_functions.py::test_linalg_diagonal",
"unyt/tests/test_array_functions.py::test_linalg_trace",
"unyt/tests/test_array_functions.py::test_linalg_outer",
"unyt/tests/test_array_functions.py::test_linalg_cross",
"unyt/tests/test_array_functions.py::test_linalg_matmul",
"unyt/tests/test_array_functions.py::test_linalg_matrix_norm",
"unyt/tests/test_array_functions.py::test_matrix_transpose[None]",
"unyt/tests/test_array_functions.py::test_matrix_transpose[linalg]",
"unyt/tests/test_array_functions.py::test_vecdot[None]",
"unyt/tests/test_array_functions.py::test_vecdot[linalg]",
"unyt/tests/test_array_functions.py::test_linalg_vector_norm",
"unyt/tests/test_array_functions.py::test_linalg_svd",
"unyt/tests/test_array_functions.py::test_linalg_svdvals",
"unyt/tests/test_array_functions.py::test_linalg_tensordot",
"unyt/tests/test_array_functions.py::test_histogram",
"unyt/tests/test_array_functions.py::test_histogram_implicit_units",
"unyt/tests/test_array_functions.py::test_histogram2d",
"unyt/tests/test_array_functions.py::test_histogramdd",
"unyt/tests/test_array_functions.py::test_histogram_bin_edges",
"unyt/tests/test_array_functions.py::test_concatenate",
"unyt/tests/test_array_functions.py::test_concatenate_different_units",
"unyt/tests/test_array_functions.py::test_cross",
"unyt/tests/test_array_functions.py::test_intersect1d",
"unyt/tests/test_array_functions.py::test_intersect1d_return_indices",
"unyt/tests/test_array_functions.py::test_union1d",
"unyt/tests/test_array_functions.py::test_linalg_norm",
"unyt/tests/test_array_functions.py::test_xstack[vstack]",
"unyt/tests/test_array_functions.py::test_xstack[hstack]",
"unyt/tests/test_array_functions.py::test_xstack[dstack]",
"unyt/tests/test_array_functions.py::test_xstack[column_stack]",
"unyt/tests/test_array_functions.py::test_stack[0-expected0]",
"unyt/tests/test_array_functions.py::test_stack[1-expected1]",
"unyt/tests/test_array_functions.py::test_amax",
"unyt/tests/test_array_functions.py::test_amin",
"unyt/tests/test_array_functions.py::test_around",
"unyt/tests/test_array_functions.py::test_atleast_nd",
"unyt/tests/test_array_functions.py::test_average",
"unyt/tests/test_array_functions.py::test_trim_zeros",
"unyt/tests/test_array_functions.py::test_any",
"unyt/tests/test_array_functions.py::test_append",
"unyt/tests/test_array_functions.py::test_append_inconsistent_units",
"unyt/tests/test_array_functions.py::test_block",
"unyt/tests/test_array_functions.py::test_block_units_inconsistency",
"unyt/tests/test_array_functions.py::test_can_cast",
"unyt/tests/test_array_functions.py::test_isreal_like",
"unyt/tests/test_array_functions.py::test_fft_shift[fftshift]",
"unyt/tests/test_array_functions.py::test_fft_shift[ifftshift]",
"unyt/tests/test_array_functions.py::test_trapezoid_no_x",
"unyt/tests/test_array_functions.py::test_trapezoid_with_raw_x",
"unyt/tests/test_array_functions.py::test_trapezoid_with_unit_x",
"unyt/tests/test_array_functions.py::test_trapezoid_with_raw_dx",
"unyt/tests/test_array_functions.py::test_trapezoid_with_unit_dx",
"unyt/tests/test_array_functions.py::test_scalar_reduction[min]",
"unyt/tests/test_array_functions.py::test_scalar_reduction[max]",
"unyt/tests/test_array_functions.py::test_scalar_reduction[mean]",
"unyt/tests/test_array_functions.py::test_scalar_reduction[median]",
"unyt/tests/test_array_functions.py::test_scalar_reduction[sum]",
"unyt/tests/test_array_functions.py::test_scalar_reduction[nanmin]",
"unyt/tests/test_array_functions.py::test_scalar_reduction[nanmax]",
"unyt/tests/test_array_functions.py::test_scalar_reduction[nanmean]",
"unyt/tests/test_array_functions.py::test_scalar_reduction[nanmedian]",
"unyt/tests/test_array_functions.py::test_sort[sort]",
"unyt/tests/test_array_functions.py::test_sort[sort_complex]",
"unyt/tests/test_array_functions.py::test_repeat",
"unyt/tests/test_array_functions.py::test_tile",
"unyt/tests/test_array_functions.py::test_shares_memory",
"unyt/tests/test_array_functions.py::test_nonzero",
"unyt/tests/test_array_functions.py::test_isinf",
"unyt/tests/test_array_functions.py::test_allclose",
"unyt/tests/test_array_functions.py::test_isclose[a0-b0-expected0]",
"unyt/tests/test_array_functions.py::test_isclose[a1-b1-expected1]",
"unyt/tests/test_array_functions.py::test_isclose[a2-b2-expected2]",
"unyt/tests/test_array_functions.py::test_isclose_error",
"unyt/tests/test_array_functions.py::test_xspace[linspace]",
"unyt/tests/test_array_functions.py::test_xspace[logspace]",
"unyt/tests/test_array_functions.py::test_xspace[geomspace]",
"unyt/tests/test_array_functions.py::test_full_like",
"unyt/tests/test_array_functions.py::test_x_like[empty_like]",
"unyt/tests/test_array_functions.py::test_x_like[zeros_like]",
"unyt/tests/test_array_functions.py::test_x_like[ones_like]",
"unyt/tests/test_array_functions.py::test_copy",
"unyt/tests/test_array_functions.py::test_copy_subok",
"unyt/tests/test_array_functions.py::test_copyto",
"unyt/tests/test_array_functions.py::test_copyto_edge_cases",
"unyt/tests/test_array_functions.py::test_astype",
"unyt/tests/test_array_functions.py::test_meshgrid",
"unyt/tests/test_array_functions.py::test_reshaper[transpose-args0-kwargs0]",
"unyt/tests/test_array_functions.py::test_reshaper[reshape-args1-kwargs1]",
"unyt/tests/test_array_functions.py::test_reshaper[resize-args2-kwargs2]",
"unyt/tests/test_array_functions.py::test_reshaper[expand_dims-args3-kwargs3]",
"unyt/tests/test_array_functions.py::test_reshaper[squeeze-args4-kwargs4]",
"unyt/tests/test_array_functions.py::test_reshaper[swapaxes-args5-kwargs5]",
"unyt/tests/test_array_functions.py::test_reshaper[moveaxis-args6-kwargs6]",
"unyt/tests/test_array_functions.py::test_reshaper[rot90-args7-kwargs7]",
"unyt/tests/test_array_functions.py::test_reshaper[roll-args8-kwargs8]",
"unyt/tests/test_array_functions.py::test_reshaper[rollaxis-args9-kwargs9]",
"unyt/tests/test_array_functions.py::test_reshaper[flip-args10-kwargs10]",
"unyt/tests/test_array_functions.py::test_reshaper[fliplr-args11-kwargs11]",
"unyt/tests/test_array_functions.py::test_reshaper[flipud-args12-kwargs12]",
"unyt/tests/test_array_functions.py::test_reshaper[broadcast_to-args13-kwargs13]",
"unyt/tests/test_array_functions.py::test_reshaper[delete-args14-kwargs14]",
"unyt/tests/test_array_functions.py::test_reshaper[partition-args15-kwargs15]",
"unyt/tests/test_array_functions.py::test_broadcast_arrays",
"unyt/tests/test_array_functions.py::test_xsplit[split-args0]",
"unyt/tests/test_array_functions.py::test_xsplit[dsplit-args1]",
"unyt/tests/test_array_functions.py::test_xsplit[hsplit-args2]",
"unyt/tests/test_array_functions.py::test_xsplit[vsplit-args3]",
"unyt/tests/test_array_functions.py::test_xsplit[array_split-args4]",
"unyt/tests/test_array_functions.py::test_scalar_reducer[prod-expected_units0]",
"unyt/tests/test_array_functions.py::test_scalar_reducer[var-expected_units1]",
"unyt/tests/test_array_functions.py::test_scalar_reducer[std-expected_units2]",
"unyt/tests/test_array_functions.py::test_scalar_reducer[nanprod-expected_units3]",
"unyt/tests/test_array_functions.py::test_scalar_reducer[nansum-expected_units4]",
"unyt/tests/test_array_functions.py::test_scalar_reducer[nanvar-expected_units5]",
"unyt/tests/test_array_functions.py::test_scalar_reducer[nanstd-expected_units6]",
"unyt/tests/test_array_functions.py::test_scalar_reducer[trace-expected_units7]",
"unyt/tests/test_array_functions.py::test_percentile[percentile]",
"unyt/tests/test_array_functions.py::test_percentile[quantile]",
"unyt/tests/test_array_functions.py::test_percentile[nanpercentile]",
"unyt/tests/test_array_functions.py::test_percentile[nanquantile]",
"unyt/tests/test_array_functions.py::test_diagx[diag]",
"unyt/tests/test_array_functions.py::test_diagx[diagflat]",
"unyt/tests/test_array_functions.py::test_diagx[diagonal]",
"unyt/tests/test_array_functions.py::test_fix",
"unyt/tests/test_array_functions.py::test_linalg_matrix_power",
"unyt/tests/test_array_functions.py::test_linalg_det",
"unyt/tests/test_array_functions.py::test_linalg_cholesky",
"unyt/tests/test_array_functions.py::test_linalg_lstsq",
"unyt/tests/test_array_functions.py::test_linalg_multi_dot",
"unyt/tests/test_array_functions.py::test_linalg_qr",
"unyt/tests/test_array_functions.py::test_linalg_solve[solve]",
"unyt/tests/test_array_functions.py::test_linalg_solve[tensorsolve]",
"unyt/tests/test_array_functions.py::test_linalg_cond",
"unyt/tests/test_array_functions.py::test_eig[eig]",
"unyt/tests/test_array_functions.py::test_eig[eigh]",
"unyt/tests/test_array_functions.py::test_eigvals[eigvals]",
"unyt/tests/test_array_functions.py::test_eigvals[eigvalsh]",
"unyt/tests/test_array_functions.py::test_savetxt",
"unyt/tests/test_array_functions.py::test_apply_along_axis",
"unyt/tests/test_array_functions.py::test_apply_over_axes",
"unyt/tests/test_array_functions.py::test_array_equal",
"unyt/tests/test_array_functions.py::test_array_equiv",
"unyt/tests/test_array_functions.py::test_common_type",
"unyt/tests/test_array_functions.py::test_result_type",
"unyt/tests/test_array_functions.py::test_deltas[input_units0-output_units0-diff]",
"unyt/tests/test_array_functions.py::test_deltas[input_units0-output_units0-ediff1d]",
"unyt/tests/test_array_functions.py::test_deltas[input_units0-output_units0-gradient]",
"unyt/tests/test_array_functions.py::test_deltas[input_units0-output_units0-ptp]",
"unyt/tests/test_array_functions.py::test_deltas[input_units1-output_units1-diff]",
"unyt/tests/test_array_functions.py::test_deltas[input_units1-output_units1-ediff1d]",
"unyt/tests/test_array_functions.py::test_deltas[input_units1-output_units1-gradient]",
"unyt/tests/test_array_functions.py::test_deltas[input_units1-output_units1-ptp]",
"unyt/tests/test_array_functions.py::test_cumsum[cumsum]",
"unyt/tests/test_array_functions.py::test_cumsum[nancumsum]",
"unyt/tests/test_array_functions.py::test_cumprod[cumprod]",
"unyt/tests/test_array_functions.py::test_cumprod[nancumprod]",
"unyt/tests/test_array_functions.py::test_bincount",
"unyt/tests/test_array_functions.py::test_unique",
"unyt/tests/test_array_functions.py::test_unique_all",
"unyt/tests/test_array_functions.py::test_unique_counts",
"unyt/tests/test_array_functions.py::test_unique_inverse",
"unyt/tests/test_array_functions.py::test_unique_values",
"unyt/tests/test_array_functions.py::test_take",
"unyt/tests/test_array_functions.py::test_pad",
"unyt/tests/test_array_functions.py::test_sinc",
"unyt/tests/test_array_functions.py::test_choose_mixed_units",
"unyt/tests/test_array_functions.py::test_choose",
"unyt/tests/test_array_functions.py::test_extract",
"unyt/tests/test_array_functions.py::test_fill_diagonal_mixed_units",
"unyt/tests/test_array_functions.py::test_fill_diagonal[val0]",
"unyt/tests/test_array_functions.py::test_fill_diagonal[1]",
"unyt/tests/test_array_functions.py::test_insert_mixed_units",
"unyt/tests/test_array_functions.py::test_insert[42]",
"unyt/tests/test_array_functions.py::test_insert[val1]",
"unyt/tests/test_array_functions.py::test_isin_mixed_units",
"unyt/tests/test_array_functions.py::test_isin",
"unyt/tests/test_array_functions.py::test_in1d_mixed_units",
"unyt/tests/test_array_functions.py::test_in1d",
"unyt/tests/test_array_functions.py::test_place_mixed_units",
"unyt/tests/test_array_functions.py::test_place",
"unyt/tests/test_array_functions.py::test_put_mixed_units",
"unyt/tests/test_array_functions.py::test_put",
"unyt/tests/test_array_functions.py::test_put_along_axis_mixed_units",
"unyt/tests/test_array_functions.py::test_put_along_axis",
"unyt/tests/test_array_functions.py::test_putmask_mixed_units",
"unyt/tests/test_array_functions.py::test_putmask",
"unyt/tests/test_array_functions.py::test_searchsorted_mixed_units",
"unyt/tests/test_array_functions.py::test_searchsorted[val0]",
"unyt/tests/test_array_functions.py::test_searchsorted[3]",
"unyt/tests/test_array_functions.py::test_select_mixed_units[choicelist_gen0-default0]",
"unyt/tests/test_array_functions.py::test_select_mixed_units[choicelist_gen1-34]",
"unyt/tests/test_array_functions.py::test_select",
"unyt/tests/test_array_functions.py::test_setdiff1d_mixed_units",
"unyt/tests/test_array_functions.py::test_setdiff1d",
"unyt/tests/test_array_functions.py::test_setxor1d_mixed_units",
"unyt/tests/test_array_functions.py::test_setxor1d",
"unyt/tests/test_array_functions.py::test_clip_mixed_units",
"unyt/tests/test_array_functions.py::test_clip[vmin0-vmax0]",
"unyt/tests/test_array_functions.py::test_clip[3-4]",
"unyt/tests/test_array_functions.py::test_where_mixed_units",
"unyt/tests/test_array_functions.py::test_where_single_arg",
"unyt/tests/test_array_functions.py::test_where_xy",
"unyt/tests/test_array_functions.py::test_complex_reductions[imag]",
"unyt/tests/test_array_functions.py::test_complex_reductions[real]",
"unyt/tests/test_array_functions.py::test_complex_reductions[real_if_close]",
"unyt/tests/test_array_functions.py::test_triangles[tril]",
"unyt/tests/test_array_functions.py::test_triangles[triu]",
"unyt/tests/test_array_functions.py::test_einsum",
"unyt/tests/test_array_functions.py::test_convolve",
"unyt/tests/test_array_functions.py::test_correlate",
"unyt/tests/test_array_functions.py::test_tensordot",
"unyt/tests/test_array_functions.py::test_compress",
"unyt/tests/test_array_functions.py::test_take_along_axis",
"unyt/tests/test_array_functions.py::test_unwrap",
"unyt/tests/test_array_functions.py::test_interp",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[allclose-allclose]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[apply_over_axes-apply_over_axes]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[around-around]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[array2string-array2string]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[array_equal-array_equal]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[array_equiv-array_equiv]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[array_repr-array_repr]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[block-block]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[choose-choose]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[clip-clip]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[column_stack-column_stack]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[convolve-convolve]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[correlate-correlate]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[cross-cross]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[cumprod-cumprod]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[det-linalg_det]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[diff-diff]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[dstack-dstack]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[ediff1d-ediff1d]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[eig-linalg_eig]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[eigh-linalg_eigh]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[eigvals-linalg_eigvals]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[eigvalsh-linalg_eigvalsh]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[einsum-einsum]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[fft-ftt_fft]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[fft2-ftt_fft2]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[fftn-ftt_fftn]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[fftshift-fft_fftshift]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[fill_diagonal-fill_diagonal]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[geomspace-geomspace]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[hfft-ftt_hfft]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[histogram-histogram]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[histogram2d-histogram2d]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[histogram_bin_edges-histogram_bin_edges]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[histogramdd-histogramdd]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[hstack-hstack]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[ifft-ftt_ifft]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[ifft2-ftt_ifft2]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[ifftn-ftt_ifftn]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[ifftshift-fft_ifftshift]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[ihfft-ftt_ihfft]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[in1d-in1d]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[insert-insert]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[interp-interp]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[intersect1d-intersect1d]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[inv-linalg_inv]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[irfft-ftt_irfft]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[irfft2-ftt_irfft2]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[irfftn-ftt_irfftn]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[isclose-isclose]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[isin-isin]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[kron-kron]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[linspace-linspace]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[logspace-logspace]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[lstsq-linalg_lstsq]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[nanpercentile-nanpercentile]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[nanquantile-nanquantile]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[norm-norm]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[outer-outer]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[outer-linalg_outer]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[pad-pad]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[percentile-percentile]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[pinv-linalg_pinv]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[place-place]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[prod-prod]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[ptp-ptp]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[put-put]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[put_along_axis-put_along_axis]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[quantile-quantile]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[rfft-ftt_rfft]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[rfft2-ftt_rfft2]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[rfftn-ftt_rfftn]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[savetxt-savetxt]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[searchsorted-searchsorted]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[select-select]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[setdiff1d-setdiff1d]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[sinc-sinc]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[solve-linalg_solve]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[sort_complex-sort_complex]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[stack-stack]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[svd-linalg_svd]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[tensordot-tensordot]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[tensorinv-linalg_tensorinv]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[tensorsolve-linalg_tensorsolve]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[trace-trace]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[trapezoid-trapezoid]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[tril-tril]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[triu-triu]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[union1d-union1d]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[unwrap-unwrap]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[var-var]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_all_arguments_reexposed[vstack-vstack]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[allclose-allclose]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[apply_over_axes-apply_over_axes]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[around-around]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[array2string-array2string]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[array_equal-array_equal]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[array_equiv-array_equiv]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[array_repr-array_repr]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[block-block]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[choose-choose]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[clip-clip]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[column_stack-column_stack]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[convolve-convolve]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[correlate-correlate]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[cross-cross]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[cumprod-cumprod]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[det-linalg_det]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[diff-diff]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[dstack-dstack]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[ediff1d-ediff1d]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[eig-linalg_eig]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[eigh-linalg_eigh]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[eigvals-linalg_eigvals]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[eigvalsh-linalg_eigvalsh]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[einsum-einsum]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[fft-ftt_fft]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[fft2-ftt_fft2]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[fftn-ftt_fftn]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[fftshift-fft_fftshift]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[fill_diagonal-fill_diagonal]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[geomspace-geomspace]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[hfft-ftt_hfft]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[histogram-histogram]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[histogram2d-histogram2d]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[histogram_bin_edges-histogram_bin_edges]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[histogramdd-histogramdd]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[hstack-hstack]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[ifft-ftt_ifft]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[ifft2-ftt_ifft2]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[ifftn-ftt_ifftn]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[ifftshift-fft_ifftshift]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[ihfft-ftt_ihfft]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[in1d-in1d]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[insert-insert]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[interp-interp]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[intersect1d-intersect1d]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[inv-linalg_inv]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[irfft-ftt_irfft]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[irfft2-ftt_irfft2]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[irfftn-ftt_irfftn]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[isclose-isclose]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[isin-isin]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[kron-kron]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[linspace-linspace]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[logspace-logspace]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[lstsq-linalg_lstsq]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[nanpercentile-nanpercentile]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[nanquantile-nanquantile]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[norm-norm]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[outer-outer]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[outer-linalg_outer]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[pad-pad]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[percentile-percentile]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[pinv-linalg_pinv]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[place-place]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[prod-prod]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[ptp-ptp]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[put-put]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[put_along_axis-put_along_axis]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[quantile-quantile]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[rfft-ftt_rfft]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[rfft2-ftt_rfft2]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[rfftn-ftt_rfftn]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[savetxt-savetxt]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[searchsorted-searchsorted]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[select-select]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[setdiff1d-setdiff1d]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[sinc-sinc]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[solve-linalg_solve]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[sort_complex-sort_complex]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[stack-stack]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[svd-linalg_svd]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[tensordot-tensordot]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[tensorinv-linalg_tensorinv]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[tensorsolve-linalg_tensorsolve]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[trace-trace]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[trapezoid-trapezoid]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[tril-tril]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[triu-triu]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[union1d-union1d]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[unwrap-unwrap]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[var-var]",
"unyt/tests/test_array_functions.py::TestFunctionHelpersSignatureCompatibility::test_known_arguments[vstack-vstack]"
] | [] | BSD 3-Clause "New" or "Revised" License | 20,062 | 1,051 | [
"unyt/_array_functions.py"
] |
|
reframe-hpc__reframe-3293 | 93705431c921d8377117114239b3b61c0a2c7f84 | 2024-10-25 16:02:57 | 925be11f6c01c0f035f9bc8ce9ac5ce3daff2049 | diff --git a/reframe/core/decorators.py b/reframe/core/decorators.py
index b8fbc7e2..ff62aa5e 100644
--- a/reframe/core/decorators.py
+++ b/reframe/core/decorators.py
@@ -9,7 +9,6 @@
__all__ = ['simple_test']
-
import inspect
import sys
import traceback
@@ -171,10 +170,10 @@ def _validate_test(cls):
if (cls.is_abstract()):
getlogger().warning(
- f'skipping test {cls.__qualname__!r}: '
- f'test has one or more undefined parameters'
+ f'skipping test {cls.__qualname__!r}: ' +
+ 'the following parameters are undefined: ' +
+ ', '.join(cls.param_space.undefined_params())
)
- return False
conditions = [VersionValidator(v) for v in cls._rfm_required_version]
if (cls._rfm_required_version and
diff --git a/reframe/core/fixtures.py b/reframe/core/fixtures.py
index e2854679..3e790d3f 100644
--- a/reframe/core/fixtures.py
+++ b/reframe/core/fixtures.py
@@ -325,12 +325,6 @@ class FixtureRegistry:
'''Get the uninstantiated tests of this registry'''
return self._registry.keys()
- def _filter_valid_partitions(self, candidate_parts):
- return [p for p in candidate_parts if p in self._env_by_part]
-
- def _filter_valid_environs(self, part, candidate_environs):
- return [e for e in cadidate_environs if e in self._env_by_part[part]]
-
def _is_registry(self, other):
if not isinstance(other, FixtureRegistry):
raise TypeError('other is not a FixtureRegistry')
@@ -776,7 +770,8 @@ class TestFixture:
# Check that the fixture class is not an abstract test.
if cls.is_abstract():
raise ValueError(
- f'class {cls.__qualname__!r} has undefined parameters'
+ f'fixture {cls.__qualname__!r} has undefined parameters: ' +
+ ', '.join(cls.param_space.undefined_params())
)
# Validate the scope
diff --git a/reframe/core/parameters.py b/reframe/core/parameters.py
index 16335419..f829b20e 100644
--- a/reframe/core/parameters.py
+++ b/reframe/core/parameters.py
@@ -198,7 +198,7 @@ class TestParam:
self.values = tuple(filt_vals) + self.values
except TypeError:
raise ReframeSyntaxError(
- f"'filter_param' must return an iterable"
+ "'filter_param' must return an iterable"
) from None
def is_abstract(self):
@@ -307,7 +307,7 @@ class ParamSpace(namespaces.Namespace):
try:
# Get the parameter values for the specified variant
param_values = self.__param_combinations[params_index]
- except IndexError as no_params:
+ except IndexError:
raise RuntimeError(
f'parameter space index out of range for '
f'{obj.__class__.__qualname__}'
@@ -333,6 +333,11 @@ class ParamSpace(namespaces.Namespace):
'''
return name in self.params and not self.params[name].is_abstract()
+ def undefined_params(self):
+ '''Return a list of all undefined parameters.'''
+ return [name for name, param in self.params.items()
+ if param.is_abstract()]
+
def __iter__(self):
'''Create a generator object to iterate over the parameter space
| ReFrame detects the test has undefined parameters, but doesn't say which ones
I sometimes get
```
WARNING: skipping test 'X': test has one or more undefined parameters
```
And recently got the same question from a user who was trying to run one of my tests. I found https://github.com/reframe-hpc/reframe/issues/2240 which is related, and read the answer, but... I don't fully understand the issue here. Wouldn't a piece of code like:
```
if (cls.is_abstract()):
params = cls.param_space.params
undefined_params = []
for param in params:
if not cls.param_space.defines(param):
undefined_params.append(param)
# Raise more detailed warning
getlogger().warning(
f'skipping test {cls.__qualname__!r}: '
f'test has one or more undefined parameters:'
", ".join(undefined_params)
)
```
Replacing [this segment](https://github.com/reframe-hpc/reframe/blob/1bef008fb2f561795aa2b64f56074f0baa80bda3/reframe/core/decorators.py#L173) of code do the trick?
I've seen the argumentation in #2240 that tracing it back to the line might be difficult, and that's fine by me. But naming _which_ parameter is undefined can already be a big help. Sure, users would still have to see where that parameter is defined, and why it is possible undefined, but they'd have a clear starting point for the search :)
Disclaimer: I haven't _actually_ tested the code above, but it seems like this is how it _should_ work (famous last words :)) | reframe-hpc/reframe | diff --git a/unittests/test_parameters.py b/unittests/test_parameters.py
index 118bba99..a818a3d8 100644
--- a/unittests/test_parameters.py
+++ b/unittests/test_parameters.py
@@ -10,6 +10,7 @@ import inspect
import reframe as rfm
from reframe.core.exceptions import ReframeSyntaxError
+import reframe.core.decorators as decorators
class NoParams(rfm.RunOnlyRegressionTest):
@@ -48,10 +49,13 @@ def test_params_are_present():
def test_abstract_param():
class MyTest(Abstract):
- pass
+ # Add another abstract parameter
+ P2 = parameter()
assert MyTest.param_space['P0'] == ()
assert MyTest.param_space['P1'] == ('b',)
+ assert MyTest.param_space['P2'] == ()
+ assert MyTest.param_space.undefined_params() == ['P0', 'P2']
def test_param_override():
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 4.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | archspec==0.2.5
argcomplete==3.6.1
attrs==25.3.0
certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
filelock==3.18.0
idna==3.10
iniconfig==2.1.0
jsonschema==3.2.0
lxml==5.3.0
packaging==24.2
pluggy==1.5.0
pyrsistent==0.20.0
pytest==8.3.5
pytest-parallel==0.1.1
PyYAML==6.0.2
-e git+https://github.com/reframe-hpc/reframe.git@93705431c921d8377117114239b3b61c0a2c7f84#egg=ReFrame_HPC
requests==2.32.3
semver==3.0.4
six==1.17.0
tabulate==0.9.0
tblib==3.0.0
tomli==2.2.1
urllib3==2.3.0
wcwidth==0.2.13
| name: reframe
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- archspec==0.2.5
- argcomplete==3.6.1
- attrs==25.3.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- idna==3.10
- iniconfig==2.1.0
- jsonschema==3.2.0
- lxml==5.3.0
- packaging==24.2
- pluggy==1.5.0
- pyrsistent==0.20.0
- pytest==8.3.5
- pytest-parallel==0.1.1
- pyyaml==6.0.2
- reframe-hpc==4.7.0.dev10
- requests==2.32.3
- semver==3.0.4
- six==1.17.0
- tabulate==0.9.0
- tblib==3.0.0
- tomli==2.2.1
- urllib3==2.3.0
- wcwidth==0.2.13
prefix: /opt/conda/envs/reframe
| [
"unittests/test_parameters.py::test_abstract_param"
] | [
"unittests/test_parameters.py::test_instantiate_abstract_test",
"unittests/test_parameters.py::test_param_values_are_not_set",
"unittests/test_parameters.py::test_consume_param_space",
"unittests/test_parameters.py::test_inject_params_wrong_index",
"unittests/test_parameters.py::test_simple_test_decorator",
"unittests/test_parameters.py::test_param_deepcopy"
] | [
"unittests/test_parameters.py::test_param_space_is_empty",
"unittests/test_parameters.py::test_params_are_present",
"unittests/test_parameters.py::test_param_override",
"unittests/test_parameters.py::test_param_inheritance",
"unittests/test_parameters.py::test_filter_params",
"unittests/test_parameters.py::test_wrong_filter",
"unittests/test_parameters.py::test_is_abstract_test",
"unittests/test_parameters.py::test_is_not_abstract_test",
"unittests/test_parameters.py::test_param_len_is_zero",
"unittests/test_parameters.py::test_extended_param_len",
"unittests/test_parameters.py::test_param_space_clash",
"unittests/test_parameters.py::test_multiple_inheritance",
"unittests/test_parameters.py::test_namespace_clash",
"unittests/test_parameters.py::test_double_declare",
"unittests/test_parameters.py::test_overwrite_param",
"unittests/test_parameters.py::test_param_access",
"unittests/test_parameters.py::test_param_space_read_only",
"unittests/test_parameters.py::test_parameter_override",
"unittests/test_parameters.py::test_override_regular_attribute",
"unittests/test_parameters.py::test_override_parameter",
"unittests/test_parameters.py::test_local_paramspace_is_empty",
"unittests/test_parameters.py::test_class_attr_access",
"unittests/test_parameters.py::test_get_variant_nums"
] | [] | BSD 3-Clause "New" or "Revised" License | 20,065 | 860 | [
"reframe/core/decorators.py",
"reframe/core/fixtures.py",
"reframe/core/parameters.py"
] |
|
bvanelli__actualpy-91 | 2c89c523db6e579915ab07c6ffd375659e85c506 | 2024-10-26 11:28:49 | 2c89c523db6e579915ab07c6ffd375659e85c506 | codecov[bot]: ## [Codecov](https://app.codecov.io/gh/bvanelli/actualpy/pull/91?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Brunno+Vanelli) Report
All modified and coverable lines are covered by tests :white_check_mark:
> Project coverage is 97.69%. Comparing base [(`2c89c52`)](https://app.codecov.io/gh/bvanelli/actualpy/commit/2c89c523db6e579915ab07c6ffd375659e85c506?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Brunno+Vanelli) to head [(`475beef`)](https://app.codecov.io/gh/bvanelli/actualpy/commit/475beef5a34fdc962e5236160b7803ae27fa1dd7?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Brunno+Vanelli).
<details><summary>Additional details and impacted files</summary>
```diff
@@ Coverage Diff @@
## main #91 +/- ##
==========================================
+ Coverage 97.67% 97.69% +0.01%
==========================================
Files 16 16
Lines 2196 2213 +17
==========================================
+ Hits 2145 2162 +17
Misses 51 51
```
</details>
[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/bvanelli/actualpy/pull/91?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Brunno+Vanelli).
:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Brunno+Vanelli).
| diff --git a/actual/database.py b/actual/database.py
index a131f91..2847a55 100644
--- a/actual/database.py
+++ b/actual/database.py
@@ -13,7 +13,7 @@ protobuf change message using [actual.database.BaseModel.convert][].
import datetime
import decimal
-from typing import Dict, List, Optional, Union
+from typing import Dict, List, Optional, Tuple, Union
from sqlalchemy import MetaData, Table, engine, event, inspect
from sqlalchemy.dialects.sqlite import insert
@@ -669,6 +669,31 @@ class ZeroBudgets(BaseModel, table=True):
def get_amount(self) -> decimal.Decimal:
return decimal.Decimal(self.amount) / decimal.Decimal(100)
+ @property
+ def range(self) -> Tuple[datetime.date, datetime.date]:
+ """Range of the budget as a tuple [start, end). The end date is not inclusive, as it represents the start of the
+ next month."""
+ budget_start = self.get_date().replace(day=1)
+ # conversion taken from https://stackoverflow.com/a/59199379/12681470
+ budget_end = (budget_start + datetime.timedelta(days=32)).replace(day=1)
+ return budget_start, budget_end
+
+ @property
+ def balance(self) -> decimal.Decimal:
+ """Returns the current balance of the budget. The evaluation will take into account the budget month and
+ only selected transactions for the combination month and category. Deleted transactions are ignored."""
+ budget_start, budget_end = (int(datetime.date.strftime(d, "%Y%m%d")) for d in self.range)
+ value = object_session(self).scalar(
+ select(func.coalesce(func.sum(Transactions.amount), 0)).where(
+ Transactions.category_id == self.category_id,
+ Transactions.date >= budget_start,
+ Transactions.date < budget_end,
+ Transactions.is_parent == 0,
+ Transactions.tombstone == 0,
+ )
+ )
+ return decimal.Decimal(value) / 100
+
class PendingTransactions(SQLModel, table=True):
__tablename__ = "pending_transactions"
diff --git a/actual/queries.py b/actual/queries.py
index 9960497..6e62d78 100644
--- a/actual/queries.py
+++ b/actual/queries.py
@@ -5,6 +5,7 @@ import decimal
import json
import typing
import uuid
+import warnings
import sqlalchemy
from pydantic import TypeAdapter
@@ -78,6 +79,7 @@ def get_transactions(
account: Accounts | str | None = None,
is_parent: bool = False,
include_deleted: bool = False,
+ budget: ZeroBudgets | None = None,
) -> typing.Sequence[Transactions]:
"""
Returns a list of all available transactions, sorted by date in descending order.
@@ -92,12 +94,28 @@ def get_transactions(
single transactions or the main transaction with `Transactions.splits` property. Default is to return all individual
splits, and the parent can be retrieved by `Transactions.parent`.
:param include_deleted: includes deleted transactions from the search.
+ :param budget: optional budget filter for the transactions. The budget range and category will be used to filter the
+ final results. **Usually not used together with the `start_date` and `end_date` filters, as they
+ might hide results.
:return: list of transactions with `account`, `category` and `payee` preloaded.
"""
query = _transactions_base_query(s, start_date, end_date, account, include_deleted)
query = query.filter(Transactions.is_parent == int(is_parent))
if notes:
query = query.filter(Transactions.notes.ilike(f"%{sqlalchemy.text(notes).compile()}%"))
+ if budget:
+ budget_start, budget_end = budget.range
+ if (start_date and start_date >= budget_end) or (end_date and end_date < budget_start):
+ warnings.warn(
+ f"Provided date filters [{start_date}, {end_date}) to get_transactions are outside the bounds of the "
+ f"budget range [{budget_start}, {budget_end}). Results might be empty!"
+ )
+ budget_start, budget_end = (int(datetime.date.strftime(d, "%Y%m%d")) for d in budget.range)
+ query = query.filter(
+ Transactions.date >= budget_start,
+ Transactions.date < budget_end,
+ Transactions.category_id == budget.category_id,
+ )
return s.exec(query).all()
| Category from budgets should be scoped to the filtered month
### Description
I'd think something like
```python
budget = get_budget(actual.session, datetime.date.today(), 'Online Shopping')
LOGGER.debug("%s", budget.category.balance)
```
Would only include transactions from the filtered month instead of every transaction | bvanelli/actualpy | diff --git a/tests/test_database.py b/tests/test_database.py
index 3608bd5..685a294 100644
--- a/tests/test_database.py
+++ b/tests/test_database.py
@@ -192,6 +192,7 @@ def test_rule_insertion_method(session):
def test_budgets(session):
# insert a budget
category = get_or_create_category(session, "Expenses")
+ unrelated_category = get_or_create_category(session, "Unrelated")
session.commit()
create_budget(session, date(2024, 10, 7), category, 10.0)
assert len(get_budgets(session)) == 1
@@ -204,9 +205,26 @@ def test_budgets(session):
# get a budget that already exists, but re-set it
create_budget(session, date(2024, 10, 7), category, 20.0)
assert budget.get_amount() == 20.0
+ assert budget.range == (date(2024, 10, 1), date(2024, 11, 1))
+ # insert a transaction in the range and see if they are counted on the balance
+ bank = create_account(session, "Bank")
+ t1 = create_transaction(session, date(2024, 10, 1), bank, category=category, amount=-10.0)
+ t2 = create_transaction(session, date(2024, 10, 15), bank, category=category, amount=-10.0)
+ t3 = create_transaction(session, date(2024, 10, 31), bank, category=category, amount=-15.0)
+ # should not be counted
+ create_transaction(session, date(2024, 10, 1), bank, category=category, amount=-15.0).delete()
+ create_transaction(session, date(2024, 11, 1), bank, category=category, amount=-20.0)
+ create_transaction(session, date(2024, 10, 15), bank, category=unrelated_category, amount=-20.0)
+ assert budget.balance == -35.0
+ budget_transactions = get_transactions(session, budget=budget)
+ assert len(budget_transactions) == 3
+ assert all(t in budget_transactions for t in (t1, t2, t3))
# test if it fails if category does not exist
with pytest.raises(ActualError, match="Category is provided but does not exist"):
get_budgets(session, category="foo")
+ # filtering by budget will raise a warning if get_transactions with budget also provides a start-end outside range
+ with pytest.warns(match="Provided date filters"):
+ get_transactions(session, date(2024, 9, 1), date(2024, 9, 15), budget=budget)
def test_normalize_payee():
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-mock"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt",
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/bvanelli/actualpy.git@2c89c523db6e579915ab07c6ffd375659e85c506#egg=actualpy
annotated-types==0.7.0
certifi==2025.1.31
cffi==1.17.1
cfgv==3.4.0
charset-normalizer==3.4.1
click==8.1.8
coverage==7.8.0
cryptography==44.0.2
distlib==0.3.9
docker==7.1.0
exceptiongroup==1.2.2
filelock==3.18.0
greenlet==3.1.1
identify==2.6.9
idna==3.10
iniconfig==2.1.0
markdown-it-py==3.0.0
mdurl==0.1.2
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
proto-plus==1.26.1
protobuf==6.30.2
pycparser==2.22
pydantic==2.11.1
pydantic_core==2.33.0
Pygments==2.19.1
pytest==8.3.5
pytest-cov==6.0.0
pytest-mock==3.14.0
python-dateutil==2.9.0.post0
python-dotenv==1.1.0
PyYAML==6.0.2
requests==2.31.0
rich==14.0.0
shellingham==1.5.4
six==1.17.0
SQLAlchemy==2.0.40
sqlmodel==0.0.18
testcontainers==4.9.2
tomli==2.2.1
typer==0.15.2
typing-inspection==0.4.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
wrapt==1.17.2
| name: actualpy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- annotated-types==0.7.0
- certifi==2025.1.31
- cffi==1.17.1
- cfgv==3.4.0
- charset-normalizer==3.4.1
- click==8.1.8
- coverage==7.8.0
- cryptography==44.0.2
- distlib==0.3.9
- docker==7.1.0
- exceptiongroup==1.2.2
- filelock==3.18.0
- greenlet==3.1.1
- identify==2.6.9
- idna==3.10
- iniconfig==2.1.0
- markdown-it-py==3.0.0
- mdurl==0.1.2
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- proto-plus==1.26.1
- protobuf==6.30.2
- pycparser==2.22
- pydantic==2.11.1
- pydantic-core==2.33.0
- pygments==2.19.1
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- python-dateutil==2.9.0.post0
- python-dotenv==1.1.0
- pyyaml==6.0.2
- requests==2.31.0
- rich==14.0.0
- shellingham==1.5.4
- six==1.17.0
- sqlalchemy==2.0.40
- sqlmodel==0.0.18
- testcontainers==4.9.2
- tomli==2.2.1
- typer==0.15.2
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- urllib3==2.3.0
- virtualenv==20.29.3
- wrapt==1.17.2
prefix: /opt/conda/envs/actualpy
| [
"tests/test_database.py::test_budgets"
] | [] | [
"tests/test_database.py::test_account_relationships",
"tests/test_database.py::test_transaction",
"tests/test_database.py::test_transaction_without_payee",
"tests/test_database.py::test_reconcile_transaction",
"tests/test_database.py::test_create_splits",
"tests/test_database.py::test_create_splits_error",
"tests/test_database.py::test_create_transaction_without_account_error",
"tests/test_database.py::test_rule_insertion_method",
"tests/test_database.py::test_normalize_payee",
"tests/test_database.py::test_rollback",
"tests/test_database.py::test_model_notes",
"tests/test_database.py::test_default_imported_payee",
"tests/test_database.py::test_session_error",
"tests/test_database.py::test_apply_changes"
] | [] | MIT License | 20,071 | 1,050 | [
"actual/database.py",
"actual/queries.py"
] |
statsmodels__statsmodels-9413 | 0838f111df35d73fdd921ed3007099b56928ce4f | 2024-10-28 18:24:41 | 831e26ca5dfb087658500f08236a30b99b3f4501 | diff --git a/statsmodels/tsa/vector_ar/var_model.py b/statsmodels/tsa/vector_ar/var_model.py
index 9adb66784..648a47bbf 100644
--- a/statsmodels/tsa/vector_ar/var_model.py
+++ b/statsmodels/tsa/vector_ar/var_model.py
@@ -5,6 +5,7 @@ References
----------
Lütkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
+
from __future__ import annotations
from statsmodels.compat.python import lrange
@@ -226,12 +227,13 @@ def forecast(y, coefs, trend_coefs, steps, exog=None):
-----
Lütkepohl p. 37
"""
- p = len(coefs)
- k = len(coefs[0])
+ coefs = np.asarray(coefs)
+ if coefs.ndim != 3:
+ raise ValueError("coefs must be an array with 3 dimensions")
+ p, k = coefs.shape[:2]
if y.shape[0] < p:
raise ValueError(
- f"y must by have at least order ({p}) observations. "
- f"Got {y.shape[0]}."
+ f"y must by have at least order ({p}) observations. " f"Got {y.shape[0]}."
)
# initial value
forcs = np.zeros((steps, k))
@@ -286,9 +288,7 @@ def _forecast_vars(steps, ma_coefs, sig_u):
return covs[:, inds, inds]
-def forecast_interval(
- y, coefs, trend_coefs, sig_u, steps=5, alpha=0.05, exog=1
-):
+def forecast_interval(y, coefs, trend_coefs, sig_u, steps=5, alpha=0.05, exog=1):
assert 0 < alpha < 1
q = util.norm_signif_level(alpha)
@@ -362,9 +362,7 @@ def _reordered(self, order):
params_new_inc[0, i] = params[0, i]
endog_lagged_new[:, 0] = endog_lagged[:, 0]
for j in range(k_ar):
- params_new_inc[i + j * num_end + k, :] = self.params[
- c + j * num_end + k, :
- ]
+ params_new_inc[i + j * num_end + k, :] = self.params[c + j * num_end + k, :]
endog_lagged_new[:, i + j * num_end + k] = endog_lagged[
:, c + j * num_end + k
]
@@ -444,8 +442,8 @@ def test_normality(results, signif=0.05):
Pinv = np.linalg.inv(np.linalg.cholesky(sig))
w = np.dot(Pinv, resid_c.T)
- b1 = (w ** 3).sum(1)[:, None] / results.nobs
- b2 = (w ** 4).sum(1)[:, None] / results.nobs - 3
+ b1 = (w**3).sum(1)[:, None] / results.nobs
+ b2 = (w**4).sum(1)[:, None] / results.nobs - 3
lam_skew = results.nobs * np.dot(b1.T, b1) / 6
lam_kurt = results.nobs * np.dot(b2.T, b2) / 24
@@ -544,9 +542,7 @@ class VAR(TimeSeriesModel):
y = deprecated_alias("y", "endog", remove_version="0.11.0")
- def __init__(
- self, endog, exog=None, dates=None, freq=None, missing="none"
- ):
+ def __init__(self, endog, exog=None, dates=None, freq=None, missing="none"):
super().__init__(endog, exog, dates, freq, missing=missing)
if self.endog.ndim == 1:
raise ValueError("Only gave one variable to VAR")
@@ -658,8 +654,7 @@ class VAR(TimeSeriesModel):
selections = self.select_order(maxlags=maxlags)
if not hasattr(selections, ic):
raise ValueError(
- "%s not recognized, must be among %s"
- % (ic, sorted(selections))
+ "%s not recognized, must be among %s" % (ic, sorted(selections))
)
lags = getattr(selections, ic)
if verbose:
@@ -680,13 +675,9 @@ class VAR(TimeSeriesModel):
if orig_exog_names:
x_names_to_add = orig_exog_names
else:
- x_names_to_add = [
- ("exog%d" % i) for i in range(self.exog.shape[1])
- ]
+ x_names_to_add = [("exog%d" % i) for i in range(self.exog.shape[1])]
self.data.xnames = (
- self.data.xnames[:k_trend]
- + x_names_to_add
- + self.data.xnames[k_trend:]
+ self.data.xnames[:k_trend] + x_names_to_add + self.data.xnames[k_trend:]
)
self.data.cov_names = pd.MultiIndex.from_product(
(self.data.xnames, self.data.ynames)
@@ -716,9 +707,7 @@ class VAR(TimeSeriesModel):
if exog is not None:
# TODO: currently only deterministic terms supported (exoglags==0)
# and since exoglags==0, x will be an array of size 0.
- x = util.get_var_endog(
- exog[-nobs:], 0, trend="n", has_constant="raise"
- )
+ x = util.get_var_endog(exog[-nobs:], 0, trend="n", has_constant="raise")
x_inst = exog[-nobs:]
x = np.column_stack((x, x_inst))
del x_inst # free memory
@@ -823,16 +812,12 @@ class VAR(TimeSeriesModel):
for k, v in result.info_criteria.items():
ics[k].append(v)
- selected_orders = {
- k: np.array(v).argmin() + p_min for k, v in ics.items()
- }
+ selected_orders = {k: np.array(v).argmin() + p_min for k, v in ics.items()}
return LagOrderResults(ics, selected_orders, vecm=False)
@classmethod
- def from_formula(
- cls, formula, data, subset=None, drop_cols=None, *args, **kwargs
- ):
+ def from_formula(cls, formula, data, subset=None, drop_cols=None, *args, **kwargs):
"""
Not implemented. Formulas are not supported for VAR models.
"""
@@ -860,9 +845,7 @@ class VARProcess:
trend.
"""
- def __init__(
- self, coefs, coefs_exog, sigma_u, names=None, _params_info=None
- ):
+ def __init__(self, coefs, coefs_exog, sigma_u, names=None, _params_info=None):
self.k_ar = len(coefs)
self.neqs = coefs.shape[1]
self.coefs = coefs
@@ -920,7 +903,9 @@ class VARProcess:
"""
return is_stable(self.coefs, verbose=verbose)
- def simulate_var(self, steps=None, offset=None, seed=None, initial_values=None, nsimulations=None):
+ def simulate_var(
+ self, steps=None, offset=None, seed=None, initial_values=None, nsimulations=None
+ ):
"""
simulate the VAR(p) process for the desired number of steps
@@ -963,9 +948,7 @@ class VARProcess:
# if more than intercept
# endog_lagged contains all regressors, trend, exog_user
# and lagged endog, trimmed initial observations
- offset = self.endog_lagged[:, : self.k_exog].dot(
- self.coefs_exog.T
- )
+ offset = self.endog_lagged[:, : self.k_exog].dot(self.coefs_exog.T)
steps_ = self.endog_lagged.shape[0]
else:
offset = self.intercept
@@ -992,7 +975,7 @@ class VARProcess:
steps=steps,
seed=seed,
initial_values=initial_values,
- nsimulations=nsimulations
+ nsimulations=nsimulations,
)
return y
@@ -1111,9 +1094,7 @@ class VARProcess:
def plot_acorr(self, nlags=10, linewidth=8):
"""Plot theoretical autocorrelation function"""
- fig = plotting.plot_full_acorr(
- self.acorr(nlags=nlags), linewidth=linewidth
- )
+ fig = plotting.plot_full_acorr(self.acorr(nlags=nlags), linewidth=linewidth)
return fig
def forecast(self, y, steps, exog_future=None):
@@ -1135,18 +1116,14 @@ class VARProcess:
"""
if self.exog is None and exog_future is not None:
raise ValueError(
- "No exog in model, so no exog_future supported "
- "in forecast method."
+ "No exog in model, so no exog_future supported " "in forecast method."
)
if self.exog is not None and exog_future is None:
raise ValueError(
- "Please provide an exog_future argument to "
- "the forecast method."
+ "Please provide an exog_future argument to " "the forecast method."
)
- exog_future = array_like(
- exog_future, "exog_future", optional=True, ndim=2
- )
+ exog_future = array_like(exog_future, "exog_future", optional=True, ndim=2)
if exog_future is not None:
if exog_future.shape[0] != steps:
err_msg = f"""\
@@ -1159,13 +1136,11 @@ steps ({steps}) observations.
exogs = []
if self.trend.startswith("c"): # constant term
exogs.append(np.ones(steps))
- exog_lin_trend = np.arange(
- self.n_totobs + 1, self.n_totobs + 1 + steps
- )
+ exog_lin_trend = np.arange(self.n_totobs + 1, self.n_totobs + 1 + steps)
if "t" in self.trend:
exogs.append(exog_lin_trend)
if "tt" in self.trend:
- exogs.append(exog_lin_trend ** 2)
+ exogs.append(exog_lin_trend**2)
if exog_future is not None:
exogs.append(exog_future)
@@ -1384,9 +1359,7 @@ class VARResults(VARProcess):
def plot(self):
"""Plot input time series"""
- return plotting.plot_mts(
- self.endog, names=self.names, index=self.dates
- )
+ return plotting.plot_mts(self.endog, names=self.names, index=self.dates)
@property
def df_model(self):
@@ -1662,7 +1635,7 @@ class VARResults(VARProcess):
warnings.warn(
"forecast cov takes parameter uncertainty into" "account",
OutputWarning,
- stacklevel = 2,
+ stacklevel=2,
)
else:
raise ValueError("method has to be either 'mse' or 'auto'")
@@ -1766,9 +1739,7 @@ class VARResults(VARProcess):
def fill_coll(sim):
ret = VAR(sim, exog=self.exog).fit(maxlags=k_ar, trend=self.trend)
- ret = (
- ret.orth_ma_rep(maxn=steps) if orth else ret.ma_rep(maxn=steps)
- )
+ ret = ret.orth_ma_rep(maxn=steps) if orth else ret.ma_rep(maxn=steps)
return ret.cumsum(axis=0) if cum else ret
for i in range(repl):
@@ -1989,13 +1960,13 @@ class VARResults(VARProcess):
num_det_terms = self.k_exog
# Make restriction matrix
- C = np.zeros((num_restr, k * num_det_terms + k ** 2 * p), dtype=float)
+ C = np.zeros((num_restr, k * num_det_terms + k**2 * p), dtype=float)
cols_det = k * num_det_terms
row = 0
for j in range(p):
for ing_ind in causing_ind:
for ed_ind in caused_ind:
- C[row, cols_det + ed_ind + k * ing_ind + k ** 2 * j] = 1
+ C[row, cols_det + ed_ind + k * ing_ind + k**2 * j] = 1
row += 1
# Lütkepohl 3.6.5
@@ -2111,7 +2082,7 @@ class VARResults(VARProcess):
caused = [self.names[c] for c in caused_ind]
# Note: JMulTi seems to be using k_ar+1 instead of k_ar
- k, t, p = self.neqs, self.nobs, self.k_ar
+ k, t = self.neqs, self.nobs
num_restr = len(causing) * len(caused) # called N in Lütkepohl
@@ -2198,8 +2169,8 @@ class VARResults(VARProcess):
if adjusted:
to_add /= self.nobs - t
statistic += to_add
- statistic *= self.nobs ** 2 if adjusted else self.nobs
- df = self.neqs ** 2 * (nlags - self.k_ar)
+ statistic *= self.nobs**2 if adjusted else self.nobs
+ df = self.neqs**2 * (nlags - self.k_ar)
dist = stats.chi2(df)
pvalue = dist.sf(statistic)
crit_value = dist.ppf(1 - signif)
@@ -2284,7 +2255,7 @@ class VARResults(VARProcess):
nobs = self.nobs
neqs = self.neqs
lag_order = self.k_ar
- free_params = lag_order * neqs ** 2 + neqs * self.k_exog
+ free_params = lag_order * neqs**2 + neqs * self.k_exog
if self.df_resid:
ld = logdet_symm(self.sigma_u_mle)
else:
@@ -2355,13 +2326,9 @@ class VARResultsWrapper(wrap.ResultsWrapper):
"sigma_u_mle": "cov_eq",
"stderr": "columns_eq",
}
- _wrap_attrs = wrap.union_dicts(
- TimeSeriesResultsWrapper._wrap_attrs, _attrs
- )
+ _wrap_attrs = wrap.union_dicts(TimeSeriesResultsWrapper._wrap_attrs, _attrs)
_methods = {"conf_int": "multivariate_confint"}
- _wrap_methods = wrap.union_dicts(
- TimeSeriesResultsWrapper._wrap_methods, _methods
- )
+ _wrap_methods = wrap.union_dicts(TimeSeriesResultsWrapper._wrap_methods, _methods)
wrap.populate_wrapper(VARResultsWrapper, VARResults) # noqa:E305
| Cannot forecast with VAR when lags is zero.
#### Describe the bug
Fitting and forecasting with a VAR model throws an error when the information criterion determines that the best number of lags is zero (e.g. for uniform random data).
#### Code Sample, a copy-pastable example if possible
```python
import numpy as np
from statsmodels.tsa.api import VAR
Y = np.random.rand(300, 2)
results = VAR(Y).fit(maxlags=1, ic='aic', trend="c")
results.forecast(Y, steps=5)
```
**Error**
<details>
IndexError
Traceback (most recent call last)
[<ipython-input-81-0d77b71b9e4c>](https://localhost:8080/#) in <cell line: 6>()
4 Y = np.random.rand(300, 2)
5 results = VAR(Y).fit(maxlags=1, ic='aic', trend="c")
----> 6 results.forecast(Y, steps=5)
[/usr/local/lib/python3.10/dist-packages/statsmodels/tsa/vector_ar/var_model.py](https://localhost:8080/#) in forecast(self, y, steps, exog_future)
1174 else:
1175 exog_future = np.column_stack(exogs)
-> 1176 return forecast(y, self.coefs, trend_coefs, steps, exog_future)
1177
1178 # TODO: use `mse` module-level function?
[/usr/local/lib/python3.10/dist-packages/statsmodels/tsa/vector_ar/var_model.py](https://localhost:8080/#) in forecast(y, coefs, trend_coefs, steps, exog)
228 """
229 p = len(coefs)
--> 230 k = len(coefs[0])
231 if y.shape[0] < p:
232 raise ValueError(
IndexError: index 0 is out of bounds for axis 0 with size 0
</details>
#### Expected Output
I expected it to forecast a constant value. I think there is a problem when the VarResults initializer constructs `results.coefs` because when lags=0 it will be an empty array. This is fine for statistical summary but I don't think the forecaster knows how to handle the constant prediction case.
#### Output of ``import statsmodels.api as sm; sm.show_versions()``
<details>
[paste the output of ``import statsmodels.api as sm; sm.show_versions()`` here below this line]
INSTALLED VERSIONS
------------------
Python: 3.10.12.final.0
OS: Linux 6.1.85+ #1 SMP PREEMPT_DYNAMIC Thu Jun 27 21:05:47 UTC 2024 x86_64
byteorder: little
LC_ALL: en_US.UTF-8
LANG: en_US.UTF-8
statsmodels
===========
Installed: 0.14.4 (/usr/local/lib/python3.10/dist-packages/statsmodels)
Required Dependencies
=====================
cython: 3.0.11 (/usr/local/lib/python3.10/dist-packages/Cython)
numpy: 1.26.4 (/usr/local/lib/python3.10/dist-packages/numpy)
scipy: 1.13.1 (/usr/local/lib/python3.10/dist-packages/scipy)
pandas: 2.2.2 (/usr/local/lib/python3.10/dist-packages/pandas)
dateutil: 2.8.2 (/usr/local/lib/python3.10/dist-packages/dateutil)
patsy: 0.5.6 (/usr/local/lib/python3.10/dist-packages/patsy)
Optional Dependencies
=====================
matplotlib: 3.7.1 (/usr/local/lib/python3.10/dist-packages/matplotlib)
backend: module://matplotlib_inline.backend_inline
cvxopt: 1.3.2 (/usr/local/lib/python3.10/dist-packages/cvxopt)
joblib: 1.4.2 (/usr/local/lib/python3.10/dist-packages/joblib)
Developer Tools
================
IPython: 7.34.0 (/usr/local/lib/python3.10/dist-packages/IPython)
jinja2: 3.1.4 (/usr/local/lib/python3.10/dist-packages/jinja2)
sphinx: 5.0.2 (/usr/local/lib/python3.10/dist-packages/sphinx)
pygments: 2.18.0 (/usr/local/lib/python3.10/dist-packages/pygments)
pytest: 7.4.4 (/usr/local/lib/python3.10/dist-packages/pytest)
virtualenv: Not installed
</details>
| statsmodels/statsmodels | diff --git a/statsmodels/tsa/vector_ar/tests/test_var.py b/statsmodels/tsa/vector_ar/tests/test_var.py
index e37027967..38ac4e21c 100644
--- a/statsmodels/tsa/vector_ar/tests/test_var.py
+++ b/statsmodels/tsa/vector_ar/tests/test_var.py
@@ -1,6 +1,7 @@
"""
Test VAR Model
"""
+
from statsmodels.compat.pandas import QUARTER_END, assert_index_equal
from statsmodels.compat.python import lrange
@@ -19,7 +20,7 @@ import statsmodels.tools.data as data_util
from statsmodels.tools.sm_exceptions import ValueWarning
from statsmodels.tsa.base.datetools import dates_from_str
import statsmodels.tsa.vector_ar.util as util
-from statsmodels.tsa.vector_ar.var_model import VAR, var_acf
+from statsmodels.tsa.vector_ar.var_model import VAR, forecast, var_acf
DECIMAL_12 = 12
DECIMAL_6 = 6
@@ -142,12 +143,8 @@ class RResults:
data = var_results.__dict__
self.names = data["coefs"].dtype.names
- self.params = data["coefs"].view(
- (float, len(self.names)), type=np.ndarray
- )
- self.stderr = data["stderr"].view(
- (float, len(self.names)), type=np.ndarray
- )
+ self.params = data["coefs"].view((float, len(self.names)), type=np.ndarray)
+ self.stderr = data["stderr"].view((float, len(self.names)), type=np.ndarray)
self.irf = data["irf"].item()
self.orth_irf = data["orthirf"].item()
@@ -223,9 +220,7 @@ class CheckIRF:
@pytest.mark.matplotlib
def test_plot_figsizes(self):
assert_equal(self.irf.plot().get_size_inches(), (10, 10))
- assert_equal(
- self.irf.plot(figsize=(14, 10)).get_size_inches(), (14, 10)
- )
+ assert_equal(self.irf.plot(figsize=(14, 10)).get_size_inches(), (14, 10))
assert_equal(self.irf.plot_cum_effects().get_size_inches(), (10, 10))
assert_equal(
@@ -251,16 +246,11 @@ class CheckFEVD:
def test_fevd_summary(self):
self.fevd.summary()
- @pytest.mark.xfail(
- reason="FEVD.cov() is not implemented",
- raises=NotImplementedError,
- strict=True,
- )
def test_fevd_cov(self):
# test does not crash
# not implemented
- covs = self.fevd.cov()
- raise NotImplementedError
+ with pytest.raises(NotImplementedError):
+ self.fevd.cov()
class TestVARResults(CheckIRF, CheckFEVD):
@@ -285,7 +275,7 @@ class TestVARResults(CheckIRF, CheckFEVD):
# make sure this works with no names
ndarr = self.data.view((float, 3), type=np.ndarray)
model = VAR(ndarr)
- res = model.fit(self.p)
+ model.fit(self.p)
def test_names(self):
assert_equal(self.model.endog_names, self.ref.names)
@@ -309,8 +299,8 @@ class TestVARResults(CheckIRF, CheckFEVD):
@pytest.mark.smoke
def test_repr(self):
# just want this to work
- foo = str(self.res)
- bar = repr(self.res)
+ str(self.res)
+ repr(self.res)
def test_params(self):
assert_almost_equal(self.res.params, self.ref.params, DECIMAL_3)
@@ -335,6 +325,7 @@ class TestVARResults(CheckIRF, CheckFEVD):
@pytest.mark.smoke
def test_summary(self):
summ = self.res.summary()
+ assert "Summary of " in str(summ)
def test_detsig(self):
assert_almost_equal(self.res.detomega, self.ref.detomega)
@@ -355,7 +346,8 @@ class TestVARResults(CheckIRF, CheckFEVD):
ics = ["aic", "fpe", "hqic", "bic"]
for ic in ics:
- res = self.model.fit(maxlags=10, ic=ic, verbose=True)
+ # Smoke test
+ self.model.fit(maxlags=10, ic=ic, verbose=True)
with pytest.raises(Exception):
self.model.fit(ic="foo")
@@ -407,7 +399,9 @@ class TestVARResults(CheckIRF, CheckFEVD):
@pytest.mark.smoke
def test_select_order(self):
result = self.model.fit(10, ic="aic", verbose=True)
+ assert isinstance(result.params, np.ndarray)
result = self.model.fit(10, ic="fpe", verbose=True)
+ assert isinstance(result.params, np.ndarray)
# bug
model = VAR(self.model.endog)
@@ -448,6 +442,7 @@ class TestVARResults(CheckIRF, CheckFEVD):
@pytest.mark.smoke
def test_acorr(self):
acorrs = self.res.acorr(10)
+ assert acorrs.shape == (11, 3, 3)
@pytest.mark.smoke
def test_forecast(self):
@@ -622,20 +617,17 @@ class TestVARResultsLutkepohl:
def test_irf_stderr(self):
irf_stderr = self.irf.stderr(orth=False)
for i in range(1, 1 + len(self.lut.irf_stderr)):
- assert_almost_equal(
- np.round(irf_stderr[i], 3), self.lut.irf_stderr[i - 1]
- )
+ assert_almost_equal(np.round(irf_stderr[i], 3), self.lut.irf_stderr[i - 1])
def test_cum_irf_stderr(self):
stderr = self.irf.cum_effect_stderr(orth=False)
for i in range(1, 1 + len(self.lut.cum_irf_stderr)):
- assert_almost_equal(
- np.round(stderr[i], 3), self.lut.cum_irf_stderr[i - 1]
- )
+ assert_almost_equal(np.round(stderr[i], 3), self.lut.cum_irf_stderr[i - 1])
def test_lr_effect_stderr(self):
stderr = self.irf.lr_effect_stderr(orth=False)
orth_stderr = self.irf.lr_effect_stderr(orth=True)
+ assert orth_stderr.shape == stderr.shape
assert_almost_equal(np.round(stderr, 3), self.lut.lr_stderr)
@@ -679,11 +671,14 @@ def test_var_trend():
model = VAR(data)
results = model.fit(4) # , trend = 'c')
irf = results.irf(10)
+ assert irf.irfs.shape == (11, 3, 3)
data_nc = data - data.mean(0)
model_nc = VAR(data_nc)
- results_nc = model_nc.fit(4, trend="n")
+ # Fit once with a trend
+ model_nc.fit(4, trend="n")
with pytest.raises(ValueError):
+ # Attempt to change the trend
model.fit(4, trend="t")
@@ -746,15 +741,13 @@ class TestVARExtras:
y_sim_init_2 = res0.simulate_var(seed=987128, initial_values=data[-1])
assert_allclose(y_sim_init[:k_ar], data[-k_ar:])
assert_allclose(y_sim_init_2[0], data[-1])
- assert_allclose(y_sim_init_2[k_ar-1], data[-1])
+ assert_allclose(y_sim_init_2[k_ar - 1], data[-1])
y_sim_init_3 = resl1.simulate_var(seed=987128, initial_values=data[-1])
assert_allclose(y_sim_init_3[0], data[-1])
n_sim = 900
- ysimz = res0.simulate_var(
- steps=n_sim, offset=np.zeros((n_sim, 3)), seed=987128
- )
+ ysimz = res0.simulate_var(steps=n_sim, offset=np.zeros((n_sim, 3)), seed=987128)
zero3 = np.zeros(3)
assert_allclose(ysimz.mean(0), zero3, atol=0.4)
# initialization does not use long run intercept, see #4542
@@ -767,7 +760,8 @@ class TestVARExtras:
assert_equal(res0.k_exog, 1)
assert_equal(res0.k_ar, 2)
- irf = res0.irf()
+ # Smoke test
+ res0.irf()
@pytest.mark.matplotlib
def test_process_plotting(self, close_figures):
@@ -843,9 +837,7 @@ class TestVARExtras:
# TODO: intercept differs by 4e-3, others are < 1e-12
assert_allclose(res_lin_trend.params, res_lin_trend1.params, rtol=5e-3)
assert_allclose(res_lin_trend.params, res_lin_trend2.params, rtol=5e-3)
- assert_allclose(
- res_lin_trend1.params, res_lin_trend2.params, rtol=1e-10
- )
+ assert_allclose(res_lin_trend1.params, res_lin_trend2.params, rtol=1e-10)
y1 = res_lin_trend.simulate_var(seed=987128)
y2 = res_lin_trend1.simulate_var(seed=987128)
@@ -857,18 +849,12 @@ class TestVARExtras:
h = 10
fc1 = res_lin_trend.forecast(res_lin_trend.endog[-2:], h)
exf = np.arange(len(data), len(data) + h)
- fc2 = res_lin_trend1.forecast(
- res_lin_trend1.endog[-2:], h, exog_future=exf
- )
+ fc2 = res_lin_trend1.forecast(res_lin_trend1.endog[-2:], h, exog_future=exf)
with pytest.raises(ValueError, match="exog_future only has"):
wrong_exf = np.arange(len(data), len(data) + h // 2)
- res_lin_trend1.forecast(
- res_lin_trend1.endog[-2:], h, exog_future=wrong_exf
- )
+ res_lin_trend1.forecast(res_lin_trend1.endog[-2:], h, exog_future=wrong_exf)
exf2 = exf[:, None] ** [0, 1]
- fc3 = res_lin_trend2.forecast(
- res_lin_trend2.endog[-2:], h, exog_future=exf2
- )
+ fc3 = res_lin_trend2.forecast(res_lin_trend2.endog[-2:], h, exog_future=exf2)
assert_allclose(fc2, fc1, rtol=1e-12, atol=1e-12)
assert_allclose(fc3, fc1, rtol=1e-12, atol=1e-12)
assert_allclose(fc3, fc2, rtol=1e-12, atol=1e-12)
@@ -900,8 +886,8 @@ class TestVARExtras:
sim2_init = res0.simulate_var(
seed=987128, steps=10, initial_values=init, nsimulations=2
)
- assert_allclose(sim2_init[0,:k_ar], init)
- assert_allclose(sim2_init[1,:k_ar], init)
+ assert_allclose(sim2_init[0, :k_ar], init)
+ assert_allclose(sim2_init[1, :k_ar], init)
def test_var_cov_params_pandas(bivariate_var_data):
@@ -919,9 +905,7 @@ def test_var_cov_params_pandas(bivariate_var_data):
def test_summaries_exog(reset_randomstate):
y = np.random.standard_normal((500, 6))
df = pd.DataFrame(y)
- cols = [f"endog_{i}" for i in range(2)] + [
- f"exog_{i}" for i in range(4)
- ]
+ cols = [f"endog_{i}" for i in range(2)] + [f"exog_{i}" for i in range(4)]
df.columns = cols
df.index = pd.date_range("1-1-1950", periods=500, freq="MS")
endog = df.iloc[:, :2]
@@ -978,9 +962,7 @@ def test_correct_nobs():
# make a VAR model
model = VAR(endog=data, exog=data_exog)
results = model.fit(maxlags=1)
- irf = results.irf_resim(
- orth=False, repl=100, steps=10, seed=1, burn=100, cum=False
- )
+ irf = results.irf_resim(orth=False, repl=100, steps=10, seed=1, burn=100, cum=False)
assert irf.shape == (100, 11, 3, 3)
@@ -991,7 +973,26 @@ def test_irf_err_bands():
model = VAR(data)
results = model.fit(maxlags=2)
irf = results.irf()
- bands_sz1 = irf.err_band_sz1()
- bands_sz2 = irf.err_band_sz2()
- bands_sz3 = irf.err_band_sz3()
- bands_mc = irf.errband_mc()
+ # Smoke tests only
+ irf.err_band_sz1()
+ irf.err_band_sz2()
+ irf.err_band_sz3()
+ irf.errband_mc()
+
+
+def test_0_lag(reset_randomstate):
+ # GH 9412
+ y = np.random.rand(300, 2)
+ results = VAR(y).fit(maxlags=1, ic="aic", trend="c")
+ assert results.params.shape == (1, 2)
+ fcasts = results.forecast(y, steps=5)
+ assert_allclose(fcasts, np.ones((5, 1)) * results.params)
+
+
+def test_forecast_wrong_shape_params(reset_randomstate):
+ # GH 9412
+ y = np.random.rand(300, 2)
+ mod = VAR(y)
+ results = mod.fit(maxlags=1, ic="aic", trend="c")
+ with pytest.raises(ValueError):
+ forecast(y, results.params, results.params, steps=5)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 0.14 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
numpy==2.0.2
packaging==24.2
pandas==2.2.3
patsy==1.0.1
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
scipy==1.13.1
six==1.17.0
-e git+https://github.com/statsmodels/statsmodels.git@0838f111df35d73fdd921ed3007099b56928ce4f#egg=statsmodels
tomli==2.2.1
tzdata==2025.2
| name: statsmodels
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- patsy==1.0.1
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scipy==1.13.1
- six==1.17.0
- statsmodels==0.15.0.dev516+g0838f111d
- tomli==2.2.1
- tzdata==2025.2
prefix: /opt/conda/envs/statsmodels
| [
"statsmodels/tsa/vector_ar/tests/test_var.py::test_0_lag",
"statsmodels/tsa/vector_ar/tests/test_var.py::test_forecast_wrong_shape_params"
] | [] | [
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_fevd_repr",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_fevd_summary",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_fevd_cov",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_irf_coefs",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_constructor",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_names",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_get_eq_index",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_repr",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_params",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_cov_params",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_cov_ybar",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_tstat",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_pvalues",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_summary",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_detsig",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_aic",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_bic",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_hqic",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_fpe",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_lagorder_select",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_nobs",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_stderr",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_loglike",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_ma_rep",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_causality",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_causality_no_lags",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_select_order",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_is_stable",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_acf",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_acf_2_lags",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_acorr",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_forecast",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_forecast_interval",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_reorder",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResults::test_pickle",
"statsmodels/tsa/vector_ar/tests/test_var.py::test_lutkepohl_parse",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResultsLutkepohl::test_approx_mse",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResultsLutkepohl::test_irf_stderr",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResultsLutkepohl::test_cum_irf_stderr",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARResultsLutkepohl::test_lr_effect_stderr",
"statsmodels/tsa/vector_ar/tests/test_var.py::test_get_trendorder",
"statsmodels/tsa/vector_ar/tests/test_var.py::test_var_constant",
"statsmodels/tsa/vector_ar/tests/test_var.py::test_var_trend",
"statsmodels/tsa/vector_ar/tests/test_var.py::test_irf_trend",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARExtras::test_process",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARExtras::test_forecast_cov",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARExtras::test_exog",
"statsmodels/tsa/vector_ar/tests/test_var.py::TestVARExtras::test_multiple_simulations",
"statsmodels/tsa/vector_ar/tests/test_var.py::test_var_cov_params_pandas",
"statsmodels/tsa/vector_ar/tests/test_var.py::test_summaries_exog",
"statsmodels/tsa/vector_ar/tests/test_var.py::test_whiteness_nlag",
"statsmodels/tsa/vector_ar/tests/test_var.py::test_var_maxlag",
"statsmodels/tsa/vector_ar/tests/test_var.py::test_from_formula",
"statsmodels/tsa/vector_ar/tests/test_var.py::test_correct_nobs",
"statsmodels/tsa/vector_ar/tests/test_var.py::test_irf_err_bands"
] | [] | BSD 3-Clause "New" or "Revised" License | 20,087 | 3,594 | [
"statsmodels/tsa/vector_ar/var_model.py"
] |
|
koaning__scikit-lego-708 | 3d51e7b3e85ba755b6c251ec02297756fd5e8242 | 2024-10-29 09:28:28 | 706971f112ee5cc8fad2513efeb3a22c9a7bf707 | diff --git a/sklego/linear_model.py b/sklego/linear_model.py
index 7c05262..ebe9fc4 100644
--- a/sklego/linear_model.py
+++ b/sklego/linear_model.py
@@ -141,7 +141,17 @@ class LowessRegression(BaseEstimator, RegressorMixin):
X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
check_is_fitted(self, ["X_", "y_"])
- results = np.stack([np.average(self.y_, weights=self._calc_wts(x_i=x_i)) for x_i in X])
+ try:
+ results = np.stack([np.average(self.y_, weights=self._calc_wts(x_i=x_i)) for x_i in X])
+ except ZeroDivisionError:
+ msg = (
+ "Weights, resulting from `np.exp(-(distances**2) / self.sigma)`, are all zero. "
+ "Try to increase the value of `sigma` or to normalize the input data.\n\n"
+ "`distances` refer to the distance between each sample `x_i` with all the"
+ "training samples."
+ )
+ raise ValueError(msg)
+
return results
| [BUG] LowessRegressor: ZeroDivisionError: Weights sum to zero, can't be normalized
Using LowessRegressor, I received a "ZeroDivisionError: Weights sum to zero, can't be normalized" error. A little debugging revealed that the problem is in _calc_wts:
```
def _calc_wts(self, x_i):
"""Calculate the weights for a single point `x_i` using the training data `self.X_` and the parameters
`self.sigma` and `self.span`. The weights are calculated as `np.exp(-(distances**2) / self.sigma)`,
where distances are the distances between `x_i` and all the training samples.
If `self.span` is not None, then the weights are multiplied by
`(distances <= np.quantile(distances, q=self.span))`.
"""
distances = np.linalg.norm(self.X_ - x_i, axis=1)
weights = np.exp(-(distances**2) / self.sigma)
if self.span:
weights = weights * (distances <= np.quantile(distances, q=self.span))
return weights
```
If sigma is not sized properly, the scaled distances will be large and cause np.exp() to return zeros for the weights, resulting in an exception being raised when the weights are applied.
This seems like the wrong behavior. Perhaps the weights should be clipped to some minimum non-zero weight so that the predictor can still be used, perhaps with a warning message to explain the problem? | koaning/scikit-lego | diff --git a/tests/test_estimators/test_lowess.py b/tests/test_estimators/test_lowess.py
index a135a0a..a4d39f3 100644
--- a/tests/test_estimators/test_lowess.py
+++ b/tests/test_estimators/test_lowess.py
@@ -1,4 +1,7 @@
+import re
+
import numpy as np
+import pytest
from sklearn.utils.estimator_checks import parametrize_with_checks
from sklego.linear_model import LowessRegression
@@ -15,3 +18,18 @@ def test_obvious_usecase():
y = np.ones(x.shape)
y_pred = LowessRegression().fit(X, y).predict(X)
assert np.isclose(y, y_pred).all()
+
+
+def test_custom_error_for_zero_division():
+ x = np.arange(0, 100)
+ X = x.reshape(-1, 1)
+ y = np.ones(x.shape)
+ estimator = LowessRegression(sigma=1e-10).fit(X, y)
+
+ with pytest.raises(
+ ValueError, match=re.escape("Weights, resulting from `np.exp(-(distances**2) / self.sigma)`, are all zero.")
+ ):
+ # Adding an offset, otherwise X to predict would be the same as X in fit method,
+ # leading to weight of 1 for the corresponding value.
+ X_pred = X[:10] + 0.5
+ estimator.predict(X_pred)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-xdist",
"pytest-cov",
"pytest-mock"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | babel==2.17.0
backrefs==5.8
certifi==2025.1.31
cfgv==3.4.0
charset-normalizer==3.4.1
clarabel==0.10.0
click==8.1.8
cmake==4.0.0
colorama==0.4.6
coverage==7.8.0
cvxpy==1.6.4
distlib==0.3.9
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
formulaic==1.1.1
ghp-import==2.1.0
griffe==1.7.1
identify==2.6.9
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
interface-meta==1.3.0
Jinja2==3.1.6
joblib==1.4.2
llvmlite==0.43.0
Markdown==3.7
MarkupSafe==3.0.2
mergedeep==1.3.4
mkdocs==1.6.1
mkdocs-autorefs==1.4.1
mkdocs-get-deps==0.2.0
mkdocs-material==9.6.10
mkdocs-material-extensions==1.3.1
mkdocstrings==0.29.1
mkdocstrings-python==1.16.8
narwhals==1.32.0
nodeenv==1.9.1
numba==0.60.0
numpy==1.26.4
osqp==1.0.1
packaging==24.2
paginate==0.5.7
pandas==2.2.3
pathspec==0.12.1
platformdirs==4.3.7
pluggy==1.5.0
polars==1.26.0
pre_commit==4.2.0
pyarrow==19.0.1
Pygments==2.19.1
pymdown-extensions==10.14.3
pynndescent==0.5.13
pytest==8.3.5
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
pyyaml_env_tag==0.1
requests==2.32.3
ruff==0.11.2
scikit-learn==1.6.1
-e git+https://github.com/koaning/scikit-lego.git@3d51e7b3e85ba755b6c251ec02297756fd5e8242#egg=scikit_lego
scipy==1.13.1
scs==3.2.7.post2
six==1.17.0
threadpoolctl==3.6.0
tomli==2.2.1
tqdm==4.67.1
typing_extensions==4.13.0
tzdata==2025.2
umap-learn==0.5.7
urllib3==2.3.0
virtualenv==20.29.3
watchdog==6.0.0
wrapt==1.17.2
zipp==3.21.0
| name: scikit-lego
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- babel==2.17.0
- backrefs==5.8
- certifi==2025.1.31
- cfgv==3.4.0
- charset-normalizer==3.4.1
- clarabel==0.10.0
- click==8.1.8
- cmake==4.0.0
- colorama==0.4.6
- coverage==7.8.0
- cvxpy==1.6.4
- distlib==0.3.9
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- formulaic==1.1.1
- ghp-import==2.1.0
- griffe==1.7.1
- identify==2.6.9
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- interface-meta==1.3.0
- jinja2==3.1.6
- joblib==1.4.2
- llvmlite==0.43.0
- markdown==3.7
- markupsafe==3.0.2
- mergedeep==1.3.4
- mkdocs==1.6.1
- mkdocs-autorefs==1.4.1
- mkdocs-get-deps==0.2.0
- mkdocs-material==9.6.10
- mkdocs-material-extensions==1.3.1
- mkdocstrings==0.29.1
- mkdocstrings-python==1.16.8
- narwhals==1.32.0
- nodeenv==1.9.1
- numba==0.60.0
- numpy==1.26.4
- osqp==1.0.1
- packaging==24.2
- paginate==0.5.7
- pandas==2.2.3
- pathspec==0.12.1
- platformdirs==4.3.7
- pluggy==1.5.0
- polars==1.26.0
- pre-commit==4.2.0
- pyarrow==19.0.1
- pygments==2.19.1
- pymdown-extensions==10.14.3
- pynndescent==0.5.13
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- pyyaml-env-tag==0.1
- requests==2.32.3
- ruff==0.11.2
- scikit-learn==1.6.1
- scikit-lego==0.9.2
- scipy==1.13.1
- scs==3.2.7.post2
- six==1.17.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- tqdm==4.67.1
- typing-extensions==4.13.0
- tzdata==2025.2
- umap-learn==0.5.7
- urllib3==2.3.0
- virtualenv==20.29.3
- watchdog==6.0.0
- wrapt==1.17.2
- zipp==3.21.0
prefix: /opt/conda/envs/scikit-lego
| [
"tests/test_estimators/test_lowess.py::test_custom_error_for_zero_division"
] | [
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_n_features_in_after_fitting]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_mixin_order]"
] | [
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimator_cloneable0]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimator_cloneable1]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimator_tags_renamed]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_valid_tag_types]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimator_repr]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_no_attributes_set_in_init]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_fit_score_takes_y]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimators_overwrite_params]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_dont_overwrite_parameters]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimators_fit_returns_self]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_readonly_memmap_input]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimators_unfitted]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_do_not_raise_errors_in_init_or_set_params]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_positive_only_tag_during_fit]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimators_dtypes]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_complex_data]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_dtype_object]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimators_empty_data_messages]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_pipeline_consistency]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimators_nan_inf]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimator_sparse_tag]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimator_sparse_array]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimator_sparse_matrix]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimators_pickle]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_estimators_pickle(readonly_memmap=True)]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_f_contiguous_array_estimator]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_parameters_default_constructible]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_methods_sample_order_invariance]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_methods_subset_invariance]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_fit2d_1sample]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_fit2d_1feature]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_get_params_invariance]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_set_params]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_dict_unchanged]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_fit_idempotent]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_fit_check_is_fitted]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_n_features_in]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_fit1d]",
"tests/test_estimators/test_lowess.py::test_sklearn_compatible_estimator[LowessRegression()-check_fit2d_predict1d]",
"tests/test_estimators/test_lowess.py::test_obvious_usecase"
] | [] | MIT License | 20,094 | 290 | [
"sklego/linear_model.py"
] |
|
IAMconsortium__nomenclature-420 | cf930aa4fc3782f53bfa7a1c20d3f045d810cd56 | 2024-10-29 09:43:30 | cf930aa4fc3782f53bfa7a1c20d3f045d810cd56 | danielhuppmann: Sweet! | diff --git a/nomenclature/code.py b/nomenclature/code.py
index 47762ce..f983edc 100644
--- a/nomenclature/code.py
+++ b/nomenclature/code.py
@@ -125,12 +125,26 @@ class Code(BaseModel):
def _replace_or_recurse(_attr, _value):
# if the attribute is a string and contains "{tag}" replace
if isinstance(_value, str) and "{" + tag + "}" in _value:
- # if the the target has the corresponding attribute replace
+ # if the target has the attribute, replace the tag with the value
if _attr in target.flattened_dict:
return _value.replace("{" + tag + "}", getattr(target, _attr))
# otherwise return the name
else:
return _value.replace("{" + tag + "}", getattr(target, "name"))
+ # if the attribute is an integer and "tier"
+ elif _attr == "tier" and isinstance(_value, int):
+ # if tier in tag is str formatted as "^1"/"^2"
+ if (tag_tier := getattr(target, _attr, None)) in {"^1", "^2"}:
+ return _value + int(tag_tier[-1])
+ # if tag doesn't have tier attribute
+ elif not tag_tier:
+ return _value
+ # else misformatted tier in tag
+ else:
+ raise ValueError(
+ f"Invalid 'tier' attribute in '{tag}' tag '{target.name}': {tag_tier}\n"
+ "Allowed values are '^1' or '^2'."
+ )
# if the attribute is a list, iterate over the items and replace tags
elif isinstance(_value, list):
return [_replace_or_recurse(_attr, _v) for _v in _value]
@@ -164,6 +178,7 @@ class Code(BaseModel):
class VariableCode(Code):
unit: Union[str, List[str]] = Field(...)
+ tier: int | str | None = None
weight: str | None = None
region_aggregation: List[Dict[str, Dict]] | None = Field(
default=None, alias="region-aggregation"
| Make "tier" a known attribute as integer
We often use `tier` as a variable-attribute to indicate importance of a variable - tier 1 is mandatory to be reported in a scenario-comparison project, tier 2 and 3 are optional.
This could be added as a standard attribute (as integer) to the VariableCode, but with a tag-list twist (a very special case of #402).
- If the Variable has a tier and the tag-list doesn't: easy
- If the Variable has a tier and the tag-list has a tier-attribute of the type "+1" - add the value to the tier.
- If the Variable doesn't have a tier: ignore
[I suggest the format "+1" to clearly indicate that this is added. If the tier in a tag-list is an integer, an error should be raised.]
This would help in cases like [here](https://github.com/IAMconsortium/common-definitions/blob/main/definitions/variable/energy/final-energy.yaml#L26) where `Final Energy|Non-Energy Use|Chemicals|Liquids` should have tier 2 but `Final Energy|Non-Energy Use|Chemicals|High Value Chemicals|Liquids|Biomass` should probably be tier 4. The of these variables are made of the same tag-lists (each combining different priorities), so having this flexibility to indicate priority within a tag list would be very helpful...
| IAMconsortium/nomenclature | diff --git a/tests/data/codelist/tier_attribute/invalid/tag_fuel.yaml b/tests/data/codelist/tier_attribute/invalid/tag_fuel.yaml
new file mode 100644
index 0000000..5c50579
--- /dev/null
+++ b/tests/data/codelist/tier_attribute/invalid/tag_fuel.yaml
@@ -0,0 +1,4 @@
+- Fuel:
+ - Coal:
+ description: coal
+ tier: +1
diff --git a/tests/data/codelist/tier_attribute/invalid/variables.yaml b/tests/data/codelist/tier_attribute/invalid/variables.yaml
new file mode 100644
index 0000000..a1bed6d
--- /dev/null
+++ b/tests/data/codelist/tier_attribute/invalid/variables.yaml
@@ -0,0 +1,8 @@
+- Primary Energy:
+ description: Total primary energy consumption
+ unit: EJ/yr
+- Primary Energy|{Fuel}:
+ description: Primary energy consumption of {Fuel}
+ unit: EJ/yr
+ tier: 1
+
diff --git a/tests/data/codelist/tier_attribute/valid/tag_fuel.yaml b/tests/data/codelist/tier_attribute/valid/tag_fuel.yaml
new file mode 100644
index 0000000..d62e410
--- /dev/null
+++ b/tests/data/codelist/tier_attribute/valid/tag_fuel.yaml
@@ -0,0 +1,5 @@
+- Fuel:
+ - Coal:
+ description: coal
+ - Coal|Lignite:
+ tier: ^1
diff --git a/tests/data/codelist/tier_attribute/valid/tag_sector.yaml b/tests/data/codelist/tier_attribute/valid/tag_sector.yaml
new file mode 100644
index 0000000..984d493
--- /dev/null
+++ b/tests/data/codelist/tier_attribute/valid/tag_sector.yaml
@@ -0,0 +1,6 @@
+- Sector:
+ - Industry:
+ description: industrial sector excluding non-energy use (e.g.feedstocks)
+ - Industry|Chemicals:
+ description: chemicals industry
+ tier: ^1
diff --git a/tests/data/codelist/tier_attribute/valid/variables.yaml b/tests/data/codelist/tier_attribute/valid/variables.yaml
new file mode 100644
index 0000000..6132cb1
--- /dev/null
+++ b/tests/data/codelist/tier_attribute/valid/variables.yaml
@@ -0,0 +1,16 @@
+- Primary Energy:
+ description: Total primary energy consumption
+ unit: EJ/yr
+- Primary Energy|{Fuel}:
+ description: Primary energy consumption of {Fuel}
+ unit: EJ/yr
+ # this variable does not have a tier to test that no tier is added at initialization
+
+- Final Energy|{Fuel}|{Sector}:
+ description: Final energy consumption of {Fuel} in {Sector}
+ unit: EJ/yr
+ tier: 1
+- Primary Energy|{Fuel} [Share]:
+ description: Share of {Fuel} in the total primary energy mix
+ unit: '%'
+ tier: 2
diff --git a/tests/test_codelist.py b/tests/test_codelist.py
index a5c3642..85b0121 100644
--- a/tests/test_codelist.py
+++ b/tests/test_codelist.py
@@ -147,6 +147,38 @@ def test_tags_in_list_attributes():
assert getattr(code[code_name], attr_name) == value
+def test_tier_attribute_in_tags():
+ """Check for tier attribute functionality ('tier' in tags upgrade CodeList's):
+ 1) 'tier' is not added when not present in Code or tag;
+ 2) 'tier' is/are upgraded when present in Code and matching tag(s)"""
+ code_list = VariableCodeList.from_directory(
+ "variable", MODULE_TEST_DATA_DIR / "tier_attribute" / "valid"
+ )
+ # check tier attribute is upgraded correctly
+ assert code_list["Final Energy|Coal|Industry"].tier == 1
+ assert code_list["Final Energy|Coal|Lignite|Industry"].tier == 2
+ assert code_list["Final Energy|Coal|Industry|Chemicals"].tier == 2
+ assert code_list["Primary Energy|Coal [Share]"].tier == 2
+ assert code_list["Primary Energy|Coal|Lignite [Share]"].tier == 3
+
+ # check multiple tier attributes upgrade cumulatively
+ assert code_list["Final Energy|Coal|Lignite|Industry|Chemicals"].tier == 3
+
+ # check codes without tier attributes don't change
+ assert not code_list["Primary Energy"].tier
+
+
+def test_misformatted_tier_fails():
+ """Check misformatted 'tier' attributes raise errors"""
+
+ match = "Invalid 'tier' attribute in 'Fuel' tag 'Coal': 1\n"
+ "Allowed values are '^1' or '^2'."
+ with pytest.raises(ValueError, match=match):
+ VariableCodeList.from_directory(
+ "variable", MODULE_TEST_DATA_DIR / "tier_attribute" / "invalid"
+ )
+
+
def test_region_codelist():
"""Check replacing top-level hierarchy of yaml file as attribute for regions"""
code = RegionCodeList.from_directory(
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.19 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.10",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alembic==1.15.2
annotated-types==0.7.0
anyio==4.9.0
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
contourpy==1.3.1
coverage==7.8.0
cycler==0.12.1
et_xmlfile==2.0.0
exceptiongroup==1.2.2
execnet==2.1.1
fastapi==0.115.12
flexcache==0.3
flexparser==0.4
fonttools==4.56.0
gitdb==4.0.12
GitPython==3.1.44
greenlet==3.1.1
h11==0.14.0
h2==4.2.0
hpack==4.1.0
httpcore==1.0.7
httpx==0.28.1
hyperframe==6.1.0
iam-units==2023.9.12
idna==3.10
iniconfig==2.1.0
ixmp4==0.9.8
kiwisolver==1.4.8
Mako==1.3.9
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.10.1
mdurl==0.1.2
mypy==1.15.0
mypy-extensions==1.0.0
-e git+https://github.com/IAMconsortium/nomenclature.git@cf930aa4fc3782f53bfa7a1c20d3f045d810cd56#egg=nomenclature_iamc
numpy==1.26.4
openpyxl==3.1.5
packaging==24.2
pandas==2.2.3
pandera==0.23.1
pillow==11.1.0
Pint==0.24.4
platformdirs==4.3.7
pluggy==1.5.0
psycopg==3.2.6
psycopg-binary==3.2.6
pyam-iamc==2.3.0
pycountry==23.12.11
pydantic==2.11.1
pydantic-settings==2.8.1
pydantic_core==2.33.0
Pygments==2.19.1
PyJWT==2.10.1
pyparsing==3.2.3
pysquirrel==1.1
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
python-dotenv==1.1.0
pytz==2025.2
PyYAML==6.0.2
requests==2.32.3
rich==14.0.0
scipy==1.15.2
seaborn==0.13.2
shellingham==1.5.4
six==1.17.0
smmap==5.0.2
sniffio==1.3.1
SQLAlchemy==2.0.40
SQLAlchemy-Utils==0.41.2
starlette==0.46.1
toml==0.10.2
tomli==2.2.1
typeguard==4.4.2
typer==0.15.2
typing-inspect==0.9.0
typing-inspection==0.4.0
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
wquantiles==0.6
XlsxWriter==3.2.2
| name: nomenclature
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alembic==1.15.2
- annotated-types==0.7.0
- anyio==4.9.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- contourpy==1.3.1
- coverage==7.8.0
- cycler==0.12.1
- et-xmlfile==2.0.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- fastapi==0.115.12
- flexcache==0.3
- flexparser==0.4
- fonttools==4.56.0
- gitdb==4.0.12
- gitpython==3.1.44
- greenlet==3.1.1
- h11==0.14.0
- h2==4.2.0
- hpack==4.1.0
- httpcore==1.0.7
- httpx==0.28.1
- hyperframe==6.1.0
- iam-units==2023.9.12
- idna==3.10
- iniconfig==2.1.0
- ixmp4==0.9.8
- kiwisolver==1.4.8
- mako==1.3.9
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib==3.10.1
- mdurl==0.1.2
- mypy==1.15.0
- mypy-extensions==1.0.0
- nomenclature-iamc==0.19.2.dev2+cf930aa
- numpy==1.26.4
- openpyxl==3.1.5
- packaging==24.2
- pandas==2.2.3
- pandera==0.23.1
- pillow==11.1.0
- pint==0.24.4
- platformdirs==4.3.7
- pluggy==1.5.0
- psycopg==3.2.6
- psycopg-binary==3.2.6
- pyam-iamc==2.3.0
- pycountry==23.12.11
- pydantic==2.11.1
- pydantic-core==2.33.0
- pydantic-settings==2.8.1
- pygments==2.19.1
- pyjwt==2.10.1
- pyparsing==3.2.3
- pysquirrel==1.1
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- python-dotenv==1.1.0
- pytz==2025.2
- pyyaml==6.0.2
- requests==2.32.3
- rich==14.0.0
- scipy==1.15.2
- seaborn==0.13.2
- shellingham==1.5.4
- six==1.17.0
- smmap==5.0.2
- sniffio==1.3.1
- sqlalchemy==2.0.40
- sqlalchemy-utils==0.41.2
- starlette==0.46.1
- toml==0.10.2
- tomli==2.2.1
- typeguard==4.4.2
- typer==0.15.2
- typing-extensions==4.13.0
- typing-inspect==0.9.0
- typing-inspection==0.4.0
- tzdata==2025.2
- urllib3==2.3.0
- wquantiles==0.6
- xlsxwriter==3.2.2
prefix: /opt/conda/envs/nomenclature
| [
"tests/test_codelist.py::test_tier_attribute_in_tags",
"tests/test_codelist.py::test_misformatted_tier_fails"
] | [] | [
"tests/test_codelist.py::test_simple_codelist",
"tests/test_codelist.py::test_codelist_adding_duplicate_raises",
"tests/test_codelist.py::test_codelist_adding_non_code_raises",
"tests/test_codelist.py::test_codelist_name_key_mismatch",
"tests/test_codelist.py::test_codelist_to_yaml",
"tests/test_codelist.py::test_duplicate_code_raises",
"tests/test_codelist.py::test_duplicate_tag_raises",
"tests/test_codelist.py::test_tagged_codelist",
"tests/test_codelist.py::test_tags_in_list_attributes",
"tests/test_codelist.py::test_region_codelist",
"tests/test_codelist.py::test_region_codelist_nonexisting_country_name",
"tests/test_codelist.py::test_region_codelist_str_country_name",
"tests/test_codelist.py::test_norway_as_str",
"tests/test_codelist.py::test_to_excel",
"tests/test_codelist.py::test_to_csv",
"tests/test_codelist.py::test_stray_tag_fails[char_in_str-Unexpected",
"tests/test_codelist.py::test_stray_tag_fails[char_in_list-Unexpected",
"tests/test_codelist.py::test_stray_tag_fails[char_in_dict-Unexpected",
"tests/test_codelist.py::test_end_whitespace_fails",
"tests/test_codelist.py::test_variable_codelist_units",
"tests/test_codelist.py::test_variable_codelist_multiple_units",
"tests/test_codelist.py::test_to_excel_read_excel_roundtrip",
"tests/test_codelist.py::test_to_yaml_from_directory",
"tests/test_codelist.py::test_RegionCodeList_filter",
"tests/test_codelist.py::test_RegionCodeList_hierarchy",
"tests/test_codelist.py::test_codelist_general_filter",
"tests/test_codelist.py::test_codelist_general_filter_multiple_attributes",
"tests/test_codelist.py::test_codelist_general_filter_No_Elements",
"tests/test_codelist.py::test_MetaCodeList_from_directory",
"tests/test_codelist.py::test_multiple_external_repos",
"tests/test_codelist.py::test_variable_codelist_with_duplicates_raises[VariableCodeList]",
"tests/test_codelist.py::test_variable_codelist_with_duplicates_raises[CodeList]",
"tests/test_codelist.py::test_variablecodelist_list_missing_variables_to_new_file"
] | [] | Apache License 2.0 | 20,095 | 513 | [
"nomenclature/code.py"
] |
dask__fastparquet-940 | 0f7a98eac60771685f853f8a955787b7c82a5d18 | 2024-10-29 13:27:19 | 0f7a98eac60771685f853f8a955787b7c82a5d18 | yohplala: Hi @martindurant
I checked all failing environments, it is systematically related to a trouble with the setting of the conda env. and tests are actually not run.
```
C:\Users\runneradmin\micromamba-bin\micromamba.exe shell deinit -s bash -p C:\Users\runneradmin\micromamba-root -y --log-level warning
The following argument was not expected: -p
Run with --help for more information.
D:\a\_actions\mamba-org\provision-with-micromamba\main\dist\post\index.js:62[5](https://github.com/dask/fastparquet/actions/runs/11574863592/job/32220088198?pr=940#step:9:5)85
throw Error(`Failed to execute ${JSON.stringify(args)}: ${error}`)
^
Error: Failed to execute ["C:\\Users\\runneradmin\\micromamba-bin\\micromamba",["shell","deinit","-s","bash","-p","C:\\Users\\runneradmin\\micromamba-root","-y","--log-level","warning"]]: Error: The process 'C:\Users\runneradmin\micromamba-bin\micromamba.exe' failed with exit code 10[9](https://github.com/dask/fastparquet/actions/runs/11574863592/job/32220088198?pr=940#step:9:9)
```
yohplala: @martindurant , I am not super expert with the definition of cached properties. Don't hesitate to ask for an alternative fix if this one is not appropriate (I would do with some directions) | diff --git a/fastparquet/api.py b/fastparquet/api.py
index c54e5eb..3f6f781 100644
--- a/fastparquet/api.py
+++ b/fastparquet/api.py
@@ -260,7 +260,7 @@ class ParquetFile(object):
@property
def statistics(self):
- if self._statistics is None:
+ if not hasattr(self, '_statistics') or self._statistics is None:
self._statistics = statistics(self)
return self._statistics
| ``statistics`` does not work on a ParquetFile subset?
**Describe the issue**:
**Minimal Complete Verifiable Example**:
```python
import pandas as pd
import fastparquet as fp
fp.write(filename="small_df", data=pd.DataFrame({'a':[1,2,3]}), row_group_offsets=[0,2], file_scheme="hive")
pf = fp.ParquetFile("small_df")
# Works
pf.statistics
{'min': {'a': [1, 3]},
'max': {'a': [2, 3]},
'null_count': {'a': [0, 0]},
'distinct_count': {'a': [None, None]}}
# Does not work
pf[-1].statistics
AttributeError: 'ParquetFile' object has no attribute '_statistics'
```
Selecting row groups should still result in a ``ParquetFile`` and ``statistics`` should work?
**Environment**:
- Python version: 3.10
- Operating System: Ubutnu
- Install method (conda, pip, source): conda
| dask/fastparquet | diff --git a/fastparquet/test/test_api.py b/fastparquet/test/test_api.py
index a7f824e..6ffdce5 100644
--- a/fastparquet/test/test_api.py
+++ b/fastparquet/test/test_api.py
@@ -44,14 +44,23 @@ def test_statistics(tempdir):
p = ParquetFile(fn)
s = statistics(p)
- expected = {'distinct_count': {'x': [None, None],
+ expected1 = {'distinct_count': {'x': [None, None],
'y': [None, None],
'z': [None, None]},
'max': {'x': [2, 3], 'y': [2.0, 1.0], 'z': ['b', 'c']},
'min': {'x': [1, 3], 'y': [1.0, 1.0], 'z': ['a', 'c']},
'null_count': {'x': [0, 0], 'y': [0, 0], 'z': [0, 0]}}
- assert s == expected
+ assert s == expected1
+
+ expected2 = {'distinct_count': {'x': [None],
+ 'y': [None],
+ 'z': [None]},
+ 'max': {'x': [3], 'y': [1.0], 'z': ['c']},
+ 'min': {'x': [3], 'y': [1.0], 'z': ['c']},
+ 'null_count': {'x': [0], 'y': [0], 'z': [0]}}
+
+ assert p[-1].statistics == expected2
def test_logical_types(tempdir):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 2024.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pyspark"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc liblzo2-dev"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
cramjam==2.9.1
exceptiongroup==1.2.2
-e git+https://github.com/dask/fastparquet.git@0f7a98eac60771685f853f8a955787b7c82a5d18#egg=fastparquet
fsspec==2025.3.1
iniconfig==2.1.0
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pluggy==1.5.0
py4j==0.10.9.7
pyspark==3.5.5
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
tomli==2.2.1
tzdata==2025.2
| name: fastparquet
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- cramjam==2.9.1
- exceptiongroup==1.2.2
- fastparquet==2024.10.1.dev1
- fsspec==2025.3.1
- iniconfig==2.1.0
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pluggy==1.5.0
- py4j==0.10.9.7
- pyspark==3.5.5
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- tomli==2.2.1
- tzdata==2025.2
prefix: /opt/conda/envs/fastparquet
| [
"fastparquet/test/test_api.py::test_statistics"
] | [] | [
"fastparquet/test/test_api.py::test_logical_types",
"fastparquet/test/test_api.py::test_text_schema",
"fastparquet/test/test_api.py::test_empty_statistics",
"fastparquet/test/test_api.py::test_sorted_row_group_columns",
"fastparquet/test/test_api.py::test_iter",
"fastparquet/test/test_api.py::test_pickle",
"fastparquet/test/test_api.py::test_directory_local",
"fastparquet/test/test_api.py::test_directory_error",
"fastparquet/test/test_api.py::test_directory_mem",
"fastparquet/test/test_api.py::test_directory_mem_nest",
"fastparquet/test/test_api.py::test_pathlib_path",
"fastparquet/test/test_api.py::test_attributes",
"fastparquet/test/test_api.py::test_open_standard",
"fastparquet/test/test_api.py::test_filelike",
"fastparquet/test/test_api.py::test_zero_child_leaf",
"fastparquet/test/test_api.py::test_request_nonexistent_column",
"fastparquet/test/test_api.py::test_read_multiple_no_metadata",
"fastparquet/test/test_api.py::test_write_common_metadata",
"fastparquet/test/test_api.py::test_write_common_metadata_exception",
"fastparquet/test/test_api.py::test_single_upper_directory",
"fastparquet/test/test_api.py::test_string_partition_name",
"fastparquet/test/test_api.py::test_numerical_partition_name",
"fastparquet/test/test_api.py::test_floating_point_partition_name",
"fastparquet/test/test_api.py::test_datetime_partition_names",
"fastparquet/test/test_api.py::test_string_partition_names",
"fastparquet/test/test_api.py::test_mixed_partition_types[partitions0]",
"fastparquet/test/test_api.py::test_mixed_partition_types[partitions1]",
"fastparquet/test/test_api.py::test_filter_without_paths",
"fastparquet/test/test_api.py::test_filter_special",
"fastparquet/test/test_api.py::test_filter_dates",
"fastparquet/test/test_api.py::test_in_filter",
"fastparquet/test/test_api.py::test_partition_columns",
"fastparquet/test/test_api.py::test_in_filter_numbers",
"fastparquet/test/test_api.py::test_filter_stats",
"fastparquet/test/test_api.py::test_in_filters[vals0-None-None-False-False]",
"fastparquet/test/test_api.py::test_in_filters[vals1-3-3-False-True]",
"fastparquet/test/test_api.py::test_in_filters[vals2-2-2-True-False]",
"fastparquet/test/test_api.py::test_in_filters[vals3-None-7-False-False]",
"fastparquet/test/test_api.py::test_in_filters[vals4-None-2-True-False]",
"fastparquet/test/test_api.py::test_in_filters[vals5-2-None-False-False]",
"fastparquet/test/test_api.py::test_in_filters[vals6-7-None-True-False]",
"fastparquet/test/test_api.py::test_in_filters[vals7-2-4-False-False]",
"fastparquet/test/test_api.py::test_in_filters[vals8-5-6-False-True]",
"fastparquet/test/test_api.py::test_in_filters[vals9-2-3-False-True]",
"fastparquet/test/test_api.py::test_in_filters[vals10-6-7-False-True]",
"fastparquet/test/test_api.py::test_in_filters[vals11-1-2-True-False]",
"fastparquet/test/test_api.py::test_in_filters[vals12-7-8-True-False]",
"fastparquet/test/test_api.py::test_in_filters[vals13-1-8-False-False]",
"fastparquet/test/test_api.py::test_in_filters[vals14-1-8-True-False]",
"fastparquet/test/test_api.py::test_in_filter_rowgroups",
"fastparquet/test/test_api.py::test_unexisting_filter_cols",
"fastparquet/test/test_api.py::test_index_not_in_columns",
"fastparquet/test/test_api.py::test_no_index_name",
"fastparquet/test/test_api.py::test_input_column_list_not_mutated",
"fastparquet/test/test_api.py::test_drill_list",
"fastparquet/test/test_api.py::test_multi_list",
"fastparquet/test/test_api.py::test_hive_and_drill_list",
"fastparquet/test/test_api.py::test_bad_file_paths",
"fastparquet/test/test_api.py::test_compression_zstd",
"fastparquet/test/test_api.py::test_compression_lz4",
"fastparquet/test/test_api.py::test_compression_snappy",
"fastparquet/test/test_api.py::test_int96_stats",
"fastparquet/test/test_api.py::test_only_partition_columns",
"fastparquet/test/test_api.py::test_path_containing_metadata_df",
"fastparquet/test/test_api.py::test_empty_df",
"fastparquet/test/test_api.py::test_unicode_cols",
"fastparquet/test/test_api.py::test_multi_cat",
"fastparquet/test/test_api.py::test_multi_cat_single",
"fastparquet/test/test_api.py::test_multi_cat_split",
"fastparquet/test/test_api.py::test_multi",
"fastparquet/test/test_api.py::test_multi_dtype",
"fastparquet/test/test_api.py::test_simple_nested",
"fastparquet/test/test_api.py::test_pandas_metadata_inference",
"fastparquet/test/test_api.py::test_write_index_false",
"fastparquet/test/test_api.py::test_timestamp_filer",
"fastparquet/test/test_api.py::test_row_filter",
"fastparquet/test/test_api.py::test_custom_row_filter",
"fastparquet/test/test_api.py::test_select",
"fastparquet/test/test_api.py::test_head",
"fastparquet/test/test_api.py::test_spark_date_empty_rg",
"fastparquet/test/test_api.py::test_remove_rgs_no_partition",
"fastparquet/test/test_api.py::test_remove_rgs_with_partitions",
"fastparquet/test/test_api.py::test_remove_rgs_partitions_and_fsspec",
"fastparquet/test/test_api.py::test_remove_rgs_not_hive",
"fastparquet/test/test_api.py::test_remove_rgs_partitioned_pyarrow_multi",
"fastparquet/test/test_api.py::test_remove_all_rgs",
"fastparquet/test/test_api.py::test_remove_rgs_simple_merge",
"fastparquet/test/test_api.py::test_write_rgs_simple",
"fastparquet/test/test_api.py::test_write_rgs_simple_no_index",
"fastparquet/test/test_api.py::test_write_rgs_hive",
"fastparquet/test/test_api.py::test_write_rgs_hive_partitions",
"fastparquet/test/test_api.py::test_write_rgs_simple_schema_exception",
"fastparquet/test/test_api.py::test_file_renaming_no_partition",
"fastparquet/test/test_api.py::test_file_renaming_with_partitions",
"fastparquet/test/test_api.py::test_slicing_makes_copy",
"fastparquet/test/test_api.py::test_fsspec_append",
"fastparquet/test/test_api.py::test_not_quite_fsspec",
"fastparquet/test/test_api.py::test_len_and_bool",
"fastparquet/test/test_api.py::test_var_dtypes",
"fastparquet/test/test_api.py::test_not_a_path",
"fastparquet/test/test_api.py::test_cat_not_cat",
"fastparquet/test/test_api.py::test_select_or_iter",
"fastparquet/test/test_api.py::test_gh929",
"fastparquet/test/test_api.py::test_writing_to_buffer_does_not_close"
] | [] | Apache License 2.0 | 20,100 | 125 | [
"fastparquet/api.py"
] |
tobymao__sqlglot-4311 | e92904e61ab3b14fe18d472df19311f9b014f6cc | 2024-10-29 20:22:36 | c1456d07097c42a2ba2078ad30a8afe4cc89597d | diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 6e189958..e9ebf8fd 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -2314,8 +2314,10 @@ class Generator(metaclass=_Generator):
step_sql = self.sql(expression, "step")
step_sql = f" STEP {step_sql}" if step_sql else ""
interpolated_values = [
- f"{self.sql(named_expression, 'alias')} AS {self.sql(named_expression, 'this')}"
- for named_expression in expression.args.get("interpolate") or []
+ f"{self.sql(e, 'alias')} AS {self.sql(e, 'this')}"
+ if isinstance(e, exp.Alias)
+ else self.sql(e, "this")
+ for e in expression.args.get("interpolate") or []
]
interpolate = (
f" INTERPOLATE ({', '.join(interpolated_values)})" if interpolated_values else ""
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 100fd1d0..bb612912 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -4166,12 +4166,11 @@ class Parser(metaclass=_Parser):
return self.expression(exp.Connect, start=start, connect=connect, nocycle=nocycle)
- def _parse_name_as_expression(self) -> exp.Alias:
- return self.expression(
- exp.Alias,
- alias=self._parse_id_var(any_token=True),
- this=self._match(TokenType.ALIAS) and self._parse_assignment(),
- )
+ def _parse_name_as_expression(self) -> t.Optional[exp.Expression]:
+ this = self._parse_id_var(any_token=True)
+ if self._match(TokenType.ALIAS):
+ this = self.expression(exp.Alias, alias=this, this=self._parse_assignment())
+ return this
def _parse_interpolate(self) -> t.Optional[t.List[exp.Expression]]:
if self._match_text_seq("INTERPOLATE"):
| Error when using WITH FILL + INTERPOLATE in Clickhouse
Hi all,
I am writing a query that uses some WITH FILL + INTERPOLATE in Clickhouse and I am seeing the following error:
```python
ParseError: Required keyword: 'this' missing for <class 'sqlglot.expressions.Alias'>. Line 10, Col: 20.
HOUR
TO toDateTime64(1730185140, 3) - INTERVAL 7 HOUR
STEP toIntervalSecond(900)
INTERPOLATE (interp)
```
### To Reproduce
This is the offending Clickhouse query:
```sql
SELECT
1730098800::DATETIME64 DATETIME,
'test' interp
ORDER BY DATETIME
WITH FILL
FROM toDateTime64(1730098800, 3)
TO toDateTime64(1730185140, 3)
STEP toIntervalSecond(900)
INTERPOLATE (interp)
```
Steps to reproduce:
```python
import sqlglot
sql = '''
SELECT
1730098800::DATETIME64 DATETIME,
'test' interp
ORDER BY DATETIME
WITH FILL
FROM toDateTime64(1730098800, 3) - INTERVAL 7 HOUR
TO toDateTime64(1730185140, 3) - INTERVAL 7 HOUR
STEP toIntervalSecond(900)
INTERPOLATE (interp)
'''
sqlglot.parse_one(sql, read='clickhouse')
``` | tobymao/sqlglot | diff --git a/tests/dialects/test_clickhouse.py b/tests/dialects/test_clickhouse.py
index 1e84ade2..d99a4ef4 100644
--- a/tests/dialects/test_clickhouse.py
+++ b/tests/dialects/test_clickhouse.py
@@ -96,6 +96,9 @@ class TestClickhouse(Validator):
self.validate_identity("TRUNCATE TABLE t1 ON CLUSTER test_cluster")
self.validate_identity("TRUNCATE DATABASE db")
self.validate_identity("TRUNCATE DATABASE db ON CLUSTER test_cluster")
+ self.validate_identity(
+ "SELECT CAST(1730098800 AS DateTime64) AS DATETIME, 'test' AS interp ORDER BY DATETIME WITH FILL FROM toDateTime64(1730098800, 3) - INTERVAL '7' HOUR TO toDateTime64(1730185140, 3) - INTERVAL '7' HOUR STEP toIntervalSecond(900) INTERPOLATE (interp)"
+ )
self.validate_identity(
"SELECT number, COUNT() OVER (PARTITION BY number % 3) AS partition_count FROM numbers(10) WINDOW window_name AS (PARTITION BY number) QUALIFY partition_count = 4 ORDER BY number"
)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 25.28 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
identify==2.6.9
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
Pygments==2.19.1
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.4.3
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@e92904e61ab3b14fe18d472df19311f9b014f6cc#egg=sqlglot
tomli==2.2.1
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- identify==2.6.9
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pygments==2.19.1
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.4.3
- six==1.17.0
- tomli==2.2.1
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse"
] | [] | [
"tests/dialects/test_clickhouse.py::TestClickhouse::test_agg_functions",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_array_join",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_clickhouse_values",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_convert",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_cte",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_datetime_funcs",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ddl",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_drop_on_cluster",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_geom_types",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_grant",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_parameterization",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_signed_and_unsigned_types",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_ternary",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_timestr_to_time",
"tests/dialects/test_clickhouse.py::TestClickhouse::test_traverse_scope"
] | [] | MIT License | 20,106 | 496 | [
"sqlglot/generator.py",
"sqlglot/parser.py"
] |
|
radiocosmology__alpenhorn-200 | bcf90b12b56732fe79a7a83a5f7d7d7d834d5e5c | 2024-10-29 22:21:22 | bcf90b12b56732fe79a7a83a5f7d7d7d834d5e5c | diff --git a/alpenhorn/client/acq.py b/alpenhorn/client/acq.py
index 7972341..e877fbd 100644
--- a/alpenhorn/client/acq.py
+++ b/alpenhorn/client/acq.py
@@ -8,8 +8,6 @@ import peewee as pw
from ..db import ArchiveAcq, ArchiveFile, ArchiveFileCopy, StorageGroup, StorageNode
-from .connect_db import config_connect
-
RE_LOCK_FILE = re.compile(r"^\..*\.lock$")
diff --git a/alpenhorn/client/cli.py b/alpenhorn/client/cli.py
index 489f328..36a0687 100644
--- a/alpenhorn/client/cli.py
+++ b/alpenhorn/client/cli.py
@@ -1,11 +1,12 @@
"""Alpenhorn client interface."""
+import click
import datetime
import logging
-
-import click
import peewee as pw
+from .. import db
+from ..common.util import start_alpenhorn
from ..db import (
ArchiveAcq,
ArchiveFile,
@@ -15,18 +16,75 @@ from ..db import (
StorageNode,
StorageTransferAction,
)
-from alpenhorn import config, db, extensions, util
from . import acq, group, node, transport
-from .connect_db import config_connect
log = logging.getLogger(__name__)
+def _verbosity_from_cli(verbose: int, debug: int, quiet: int) -> int:
+ """Get client verbosity from command line.
+
+ Processes the --verbose, --debug and --quiet flags to determine
+ the requested verbosity."""
+
+ if quiet and verbose:
+ raise click.UsageError("Cannot use both --quiet and --verbose.")
+ if quiet and debug:
+ raise click.UsageError("Cannot use both --quiet and --debug.")
+
+ # Default verbosity is 3. --quiet decreases it. --verbose increases it.
+
+ # Max verbosity
+ if debug or verbose > 2:
+ return 5
+ # Min verbosity
+ if quiet > 2:
+ return 1
+
+ return 3 + verbose - quiet
+
+
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
-def cli():
[email protected](
+ "--conf",
+ "-c",
+ type=click.Path(exists=True),
+ help="Configuration file to read.",
+ default=None,
+ metavar="FILE",
+)
[email protected](
+ "--quiet",
+ "-q",
+ help="Decrease verbosity. May be specified mulitple times: "
+ "once suppresses normal client output, leaving only warning "
+ "and error message. A second use also suppresses warnings.",
+ count=True,
+)
[email protected](
+ "--verbose",
+ "-v",
+ help="Increase verbosity. May be specified mulitple times: "
+ "once enables informational messages. A second use also "
+ "enables debugging messages.",
+ count=True,
+)
[email protected](
+ "--debug",
+ help="Maximum verbosity.",
+ is_flag=True,
+ show_default=False,
+ default=False,
+)
+def cli(conf, quiet, verbose, debug):
"""Client interface for alpenhorn."""
+ # Initialise alpenhorn
+ start_alpenhorn(
+ conf, client=True, verbosity=_verbosity_from_cli(verbose, debug, quiet)
+ )
+
@cli.command()
def init():
@@ -36,11 +94,6 @@ def init():
specified in its configuration.
"""
- # Load the configuration and initialise the database connection
- config.load_config()
- extensions.load_extensions()
- db.config_connect()
-
# Create any alpenhorn core tables
core_tables = [
ArchiveAcq,
@@ -54,9 +107,6 @@ def init():
db.database_proxy.create_tables(core_tables, safe=True)
- # Register the acq/file type extensions
- extensions.register_type_extensions()
-
# TODO Create any tables registered by extensions
@@ -94,8 +144,6 @@ def sync(
archive (e.g. HPSS, transport disks).
"""
- config_connect()
-
try:
from_node = StorageNode.get(name=node_name)
except pw.DoesNotExist:
@@ -281,8 +329,6 @@ def status(all):
import tabulate
- config_connect()
-
# Data to fetch from the database (node name, total files, total size)
query_info = (
StorageNode.name,
diff --git a/alpenhorn/client/connect_db.py b/alpenhorn/client/connect_db.py
deleted file mode 100644
index bcce27a..0000000
--- a/alpenhorn/client/connect_db.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""Alpenhorn client database initialization functions"""
-
-from alpenhorn import config, db, extensions
-
-
-def config_connect():
- """Load the config, start the database and register extensions."""
- # Load the configuration and initialise the database connection
- config.load_config()
- extensions.load_extensions()
- db.config_connect()
-
- # Register the acq/file type extensions
- extensions.register_type_extensions()
diff --git a/alpenhorn/client/group.py b/alpenhorn/client/group.py
index 49a0622..812bf4d 100644
--- a/alpenhorn/client/group.py
+++ b/alpenhorn/client/group.py
@@ -5,8 +5,6 @@ import peewee as pw
from ..db import StorageGroup, StorageNode
-from .connect_db import config_connect
-
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
def cli():
diff --git a/alpenhorn/client/node.py b/alpenhorn/client/node.py
index 093d4dc..ba9559c 100644
--- a/alpenhorn/client/node.py
+++ b/alpenhorn/client/node.py
@@ -8,11 +8,9 @@ from collections import defaultdict
import click
import peewee as pw
+from .. import db
+from ..common import util
from ..db import ArchiveAcq, ArchiveFile, ArchiveFileCopy, StorageGroup, StorageNode
-import alpenhorn.util as util
-from alpenhorn import db
-
-from .connect_db import config_connect
RE_LOCK_FILE = re.compile(r"^\..*\.lock$")
diff --git a/alpenhorn/client/transport.py b/alpenhorn/client/transport.py
index 7561d8b..c60973c 100644
--- a/alpenhorn/client/transport.py
+++ b/alpenhorn/client/transport.py
@@ -10,7 +10,6 @@ import peewee as pw
from ..db import StorageGroup, StorageNode
from . import node
-from .connect_db import config_connect
# A few utility routines for dealing with filesystems
MAX_E2LABEL_LEN = 16
diff --git a/alpenhorn/common/config.py b/alpenhorn/common/config.py
index 9123c82..f4a6a31 100644
--- a/alpenhorn/common/config.py
+++ b/alpenhorn/common/config.py
@@ -6,6 +6,7 @@ Configuration file search order:
- `/etc/xdg/alpenhorn/alpenhorn.conf`
- `~/.config/alpenhorn/alpenhorn.conf`
- `ALPENHORN_CONFIG_FILE` environment variable
+- the path passed via `-c` or `--conf` on the command line
This is in order of increasing precedence, with options in later files
overriding those in earlier entries. Configuration is merged recursively by
@@ -135,6 +136,9 @@ Example config:
pull_bytes_per_second: 20000000
"""
+from __future__ import annotations
+
+from click import ClickException
import logging
import os
@@ -156,7 +160,7 @@ _default_config = {
}
-def load_config():
+def load_config(cli_conf: os.PathLike, client: bool) -> None:
"""Find and load the configuration from a file."""
global config
@@ -171,8 +175,12 @@ def load_config():
"~/.config/alpenhorn/alpenhorn.conf",
]
- if "ALPENHORN_CONFIG_FILE" in os.environ:
- config_files.append(os.environ["ALPENHORN_CONFIG_FILE"])
+ enviro_conf = os.environ.get("ALPENHORN_CONFIG_FILE", None)
+ if enviro_conf:
+ config_files.append(enviro_conf)
+
+ if cli_conf:
+ config_files.append(cli_conf)
any_exist = False
@@ -181,6 +189,13 @@ def load_config():
absfile = os.path.abspath(os.path.expanduser(os.path.expandvars(cfile)))
if not os.path.exists(absfile):
+ # Warn if a user-supplied config file is missing
+ if cfile == cli_conf:
+ log.warning(f"Config file {absfile} defined on command line not found.")
+ elif cfile == enviro_conf:
+ log.warning(
+ f"Config file {absfile} defined by ALPENHORN_CONFIG_FILE not found."
+ )
continue
any_exist = True
@@ -194,21 +209,15 @@ def load_config():
config = merge_dict_tree(config, conf)
if not any_exist:
- raise RuntimeError("No configuration files available.")
-
-
-class ConfigClass(object):
- """A base for classes that can be configured from a dictionary.
-
- Note that this configures the class itself, not instances of the class.
- """
+ if client:
+ exc = ClickException
+ else:
+ exc = RuntimeError
- @classmethod
- def set_config(cls, configdict):
- """Configure the class from the supplied `configdict`."""
+ raise exc("No configuration files available.")
-def merge_dict_tree(a, b):
+def merge_dict_tree(a: dict, b: dict) -> dict:
"""Merge two dictionaries recursively.
The following rules applied:
diff --git a/alpenhorn/common/logger.py b/alpenhorn/common/logger.py
index 5d7abde..361d62b 100644
--- a/alpenhorn/common/logger.py
+++ b/alpenhorn/common/logger.py
@@ -1,23 +1,45 @@
"""Set up logging for alpenhorn.
-This module provides two important functions:
+Basic Configruation
+-------------------
-* `init_logging()` should be called as soon as possible after program start
- to turn on logging to standard error. Any log messages produced before
- this call are discarded.
+Both client and server should call the `init_logging()` function as soon as
+possible after program start to turn on logging to standard error. Any log
+messages produced before this call are discarded.
-* `configure_logging()` should be called immediately after the alpenhorn
- config has been loaded. It will re-configure the alpenhorn logger
- based on the alpenhorn configuration, including starting file or syslog-
- based logging, if requested.
-alpenhorn buffers log messages emitted between these two calls and will flush
-them to any additonal log destinations started by `configure_logging` so that
-these messages are not lost. (This is in addiiton to the messages being sent
-immediately to standard error, which always happens.)
+Server Logging
+--------------
+
+The server should immediately follow the loading of the alpenhorn config
+with a call to `configure_logging()` which will re-configure the alpenhorn
+logger based on the alpenhorn configuration, including starting file or syslog-
+based logging, if requested. The client should not call this function.
+
+The alpenhorn server buffers log messages emitted between the `init_logging`
+and `configure_logging` calls and will flush them to any additonal log
+destinations started by `configure_logging` so that these messages are not lost.
+(This is in addiiton to the messages being sent immediately to standard error,
+which always happens.)
Note also that between the two calls, the log level of the root logger is
set to DEBUG.
+
+
+Client Logging
+--------------
+
+The client does not support file or syslog logging, so should _not_ call
+`configure_logging`. Instead, the client supports five verbosity levels:
+
+ 1. No output on standard out. Error messages on standard error.
+ 2. No output on standard out. Warning and error on standard error.
+ 3. Client ouput on standard out. Warning and error messages on standard error.
+ 4. Client ouput on standard out. Info, warning, errors on standard error.
+ 5. Client ouput on standard out. Debug, info, warning, errors on standard error.
+
+The initial verbosity can be specified in the `init_logging` call. The
+default verbosity is 3. May be changed at runtime by calling `set_verbosity`.
"""
import socket
@@ -34,15 +56,22 @@ try:
except ImportError:
RotatingFileHandler = logging.handlers.RotatingFileHandler
-# The log format. Used by the stderr log and any other log destinations
-log_fmt = logging.Formatter(
+# The log formats. Used by the stderr log and any other log destinations
+client_fmt = logging.Formatter(
+ "%(levelname)s >> %(message)s",
+ "%b %d %H:%M:%S",
+)
+server_fmt = logging.Formatter(
"%(asctime)s %(levelname)s >> [%(threadName)s] %(message)s",
"%b %d %H:%M:%S",
)
-# initialised by init_logging
+# initialised by init_logging; server-only
log_buffer = None
+# Client output suppression.
+_client_echo = True
+
class StartupHandler(logging.handlers.BufferingHandler):
"""Start-up logging handler for alpenhorn.
@@ -103,32 +132,83 @@ class StartupHandler(logging.handlers.BufferingHandler):
self.release()
-def init_logging() -> None:
+def echo(*args, **kwargs) -> None:
+ """Client wrapper for click.echo.
+
+ Suppresses output when verbosity is less than three.
+ """
+ if client_echo:
+ return click.echo(*args, **kwargs)
+
+
+def set_verbosity(verbosity: int) -> None:
+ """Set client verbosity.
+
+ Sets the log level of the root logger based on the
+ requested verbosity level.
+ """
+
+ # Levels 2 and 3 are the same.
+ verbosity_to_level = {
+ 1: logging.ERROR,
+ 2: logging.WARNING,
+ 3: logging.WARNING,
+ 4: logging.INFO,
+ 5: logging.DEBUG,
+ }
+
+ if verbosity not in verbosity_to_level:
+ raise ValueError(f"Bad verbosity: {verbosity}")
+
+ root_logger = logging.getLogger()
+ root_logger.setLevel(verbosity_to_level[verbosity])
+
+ # Suppress normal client output at low verbosity
+ global _client_echo
+ _client_echo = verbosity >= 3
+
+
+def init_logging(client: bool, verbosity: int | None = None) -> None:
"""Initialise the logger.
This function is called before the config is read. It sets up logging to
standard error and also starts a log buffer where messages accumulate
before the logging facilities defined by the configuration are started.
+
+ Parameters
+ ----------
+ client : bool
+ Is the alpenhorn client being initialised?
+ verbosity : int
+ For clients, the verbosity level to use. Ignored for servers.
"""
# This is the stderr logger. It is always present, regardless of logging config
log_stream = logging.StreamHandler()
- log_stream.setFormatter(log_fmt)
-
- # This is the start-up logger. It buffers messages in memory until configure_logging()
- # is called, at which point the buffered messages are flushed to a file, if one was
- # opened, so that messages logged before the start of file logging are recorded,
- # and then this handler is shut down.
- global log_buffer
- log_buffer = StartupHandler(10000)
+ log_stream.setFormatter(client_fmt if client else server_fmt)
# Set up initial logging
root_logger = logging.getLogger()
- root_logger.setLevel(logging.DEBUG)
- root_logger.addHandler(log_buffer)
root_logger.addHandler(log_stream)
- root_logger.info("Alpenhorn start.")
+ if client:
+ if verbosity is None:
+ verbosity = 3
+ set_verbosity(verbosity)
+ else:
+ root_logger.setLevel(logging.DEBUG)
+
+ # This is the start-up logger for the server. It buffers messages in memory
+ # until configure_logging() is called, at which point the buffered messages
+ # are flushed to a file, if one was opened, so that messages logged before
+ # the start of file logging are recorded, and then this handler is shut down.
+ global log_buffer
+ log_buffer = StartupHandler(10000)
+
+ root_logger.addHandler(log_buffer)
+
+ # Record server start
+ root_logger.info("Alpenhorn start.")
def _max_bytes_from_config(max_bytes: str | float | int) -> int:
@@ -223,8 +303,8 @@ def configure_sys_logging(syslog_config: dict) -> logging.handlers.SysLogHandler
)
# Format handler
- global log_fmt
- handler.setFormatter(log_fmt)
+ global server_fmt
+ handler.setFormatter(server_fmt)
# Log the start of syslogging to the alpenhorn logger.
# We do this _before_ adding the file handler to prevent
@@ -308,8 +388,8 @@ def configure_file_logging(file_config: dict) -> logging.Handler:
how = ""
# Format handler
- global log_fmt
- handler.setFormatter(log_fmt)
+ global server_fmt
+ handler.setFormatter(server_fmt)
# Log the start of file logging to the alpenhorn logger.
# We do this _before_ adding the file handler to prevent
diff --git a/alpenhorn/common/util.py b/alpenhorn/common/util.py
index 1a28ca8..a1246c3 100644
--- a/alpenhorn/common/util.py
+++ b/alpenhorn/common/util.py
@@ -6,11 +6,39 @@ import socket
import hashlib
import logging
-from . import config
+from . import config, extensions, logger
log = logging.getLogger(__name__)
+def start_alpenhorn(
+ cli_conf: str | None, client: bool, verbosity: int | None = None
+) -> None:
+ """Initialise alpenhorn
+
+ Parameters
+ ----------
+ cli_conf : str or None
+ The config file given on the command line, if any.
+ client : bool
+ Is the alpenhorn client being initialised?
+ verbosity : int, optional
+ For clients, the initial verbosity level. Ignored for servers.
+ """
+ # Initialise logging
+ logger.init_logging(client=client, verbosity=verbosity)
+
+ # Load the configuration for alpenhorn
+ config.load_config(cli_conf, client=client)
+
+ # Set up server logging based on config
+ if not client:
+ logger.configure_logging()
+
+ # Load alpenhorn extensions
+ extensions.load_extensions()
+
+
def run_command(
cmd: list[str], timeout: float | None = None, **kwargs
) -> tuple(int | None, str, str):
diff --git a/alpenhorn/server/service.py b/alpenhorn/server/service.py
index d1155fd..3f9b5df 100755
--- a/alpenhorn/server/service.py
+++ b/alpenhorn/server/service.py
@@ -5,7 +5,8 @@ import click
import logging
from .. import db
-from ..common import config, extensions, logger
+from ..common import config
+from ..common.util import start_alpenhorn
from ..scheduler import FairMultiFIFOQueue, pool
from . import auto_import, update
@@ -23,20 +24,19 @@ sys.excepthook = log_exception
@click.command()
-def cli():
[email protected](
+ "--conf",
+ "-c",
+ type=click.Path(exists=True),
+ help="Configuration file to read.",
+ default=None,
+ metavar="FILE",
+)
+def cli(conf):
"""Alpenhorn data management service."""
- # Initialise logging
- logger.init_logging()
-
- # Load the configuration for alpenhorn
- config.load_config()
-
- # Set up logging based on config
- logger.configure_logging()
-
- # Load alpenhorn extensions
- extensions.load_extensions()
+ # Initialise alpenhorn
+ start_alpenhorn(conf, client=False)
# Connect to the database
db.connect()
| Refactor duplicate config+db+extension code into a single function
We have the same code for to load the config, start the database, and register extensions duplicated in a couple of places. (Right now it's in `alpenhorn.client` in `_init_config_db` and `init`, as well as `alpenhorn.service` in `cli`.) Can we refactor it into a function in `alpenhorn/__init__`?
Leaving out the comments, this is the same set of steps that always (should? must?) happen:
```python
config.load_config()
extensions.load_extensions()
db.config_connect()
extensions.register_type_extensions()
``` | radiocosmology/alpenhorn | diff --git a/tests/common/test_config.py b/tests/common/test_config.py
index 87b1afc..b3a5f20 100644
--- a/tests/common/test_config.py
+++ b/tests/common/test_config.py
@@ -16,7 +16,7 @@ def test_no_config():
# Check that alpenhorn fails if it has no appropriate configuration
with pytest.raises(RuntimeError) as excinfo:
- config.load_config()
+ config.load_config(None, False)
assert "No configuration" in str(excinfo.value)
@@ -26,7 +26,7 @@ def test_config_env(fs, monkeypatch):
fs.create_file("/test/from/env/test.yaml", contents="hello: test\n")
monkeypatch.setenv("ALPENHORN_CONFIG_FILE", "/test/from/env/test.yaml")
- config.load_config()
+ config.load_config(None, False)
assert config.config == merge_dict(config._default_config, {"hello": "test"})
@@ -34,25 +34,25 @@ def test_precendence(fs, monkeypatch):
# Test the precedence of configuration imported from files is correct
fs.create_file("/etc/alpenhorn/alpenhorn.conf", contents="hello: test\n")
- config.load_config()
+ config.load_config(None, False)
assert config.config == merge_dict(config._default_config, {"hello": "test"})
fs.create_file("/etc/xdg/alpenhorn/alpenhorn.conf", contents="hello: test2\n")
- config.load_config()
+ config.load_config(None, False)
assert config.config == merge_dict(config._default_config, {"hello": "test2"})
fs.create_file(
os.path.expanduser("~/.config/alpenhorn/alpenhorn.conf"),
contents="hello: test3\nmeh: embiggens",
)
- config.load_config()
+ config.load_config(None, False)
assert config.config == merge_dict(
config._default_config, {"hello": "test3", "meh": "embiggens"}
)
fs.create_file("/test/from/env/test.yaml", contents="hello: test4\n")
monkeypatch.setenv("ALPENHORN_CONFIG_FILE", "/test/from/env/test.yaml")
- config.load_config()
+ config.load_config(None, False)
assert config.config == merge_dict(
config._default_config, {"hello": "test4", "meh": "embiggens"}
)
diff --git a/tests/conftest.py b/tests/conftest.py
index 4f7cf45..39cd250 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -74,7 +74,7 @@ def logger():
Yields alpenhorn.common.logger.
"""
- alpenhorn.common.logger.init_logging()
+ alpenhorn.common.logger.init_logging(False)
yield alpenhorn.common.logger
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_removed_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 9
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.10",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/radiocosmology/alpenhorn.git@bcf90b12b56732fe79a7a83a5f7d7d7d834d5e5c#egg=alpenhorn
bcrypt==4.3.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
chimedb @ git+https://github.com/chime-experiment/chimedb.git@d82f48eb0599393723e7ee5d756aff6c6830db32
click==8.1.8
concurrent-log-handler==0.9.25
cryptography==44.0.2
docker==7.1.0
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
mysql-connector-python==8.0.29
packaging==24.2
paramiko==3.5.1
peewee==3.17.9
pluggy==1.5.0
portalocker==3.1.1
protobuf==6.30.2
pycparser==2.22
pyfakefs==5.8.0
PyNaCl==1.5.0
pytest==8.3.5
PyYAML==6.0.2
requests==2.32.3
sshtunnel==0.4.0
tabulate==0.9.0
tomli==2.2.1
ujson==5.10.0
urllib3==2.3.0
watchdog==6.0.0
| name: alpenhorn
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alpenhorn==2.0.0a1
- bcrypt==4.3.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- chimedb==24.8.0.post2+git.d82f48eb
- click==8.1.8
- concurrent-log-handler==0.9.25
- cryptography==44.0.2
- docker==7.1.0
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- mysql-connector-python==8.0.29
- packaging==24.2
- paramiko==3.5.1
- peewee==3.17.9
- pluggy==1.5.0
- portalocker==3.1.1
- protobuf==6.30.2
- pycparser==2.22
- pyfakefs==5.8.0
- pynacl==1.5.0
- pytest==8.3.5
- pyyaml==6.0.2
- requests==2.32.3
- sshtunnel==0.4.0
- tabulate==0.9.0
- tomli==2.2.1
- ujson==5.10.0
- urllib3==2.3.0
- watchdog==6.0.0
prefix: /opt/conda/envs/alpenhorn
| [
"tests/common/test_config.py::test_no_config",
"tests/common/test_config.py::test_config_env",
"tests/common/test_config.py::test_precendence"
] | [] | [
"tests/common/test_config.py::test_merge"
] | [] | MIT License | 20,107 | 4,844 | [
"alpenhorn/client/acq.py",
"alpenhorn/client/cli.py",
"alpenhorn/client/connect_db.py",
"alpenhorn/client/group.py",
"alpenhorn/client/node.py",
"alpenhorn/client/transport.py",
"alpenhorn/common/config.py",
"alpenhorn/common/logger.py",
"alpenhorn/common/util.py",
"alpenhorn/server/service.py"
] |
|
fitbenchmarking__fitbenchmarking-1353 | c451994dba5cf826520ab10f894f3b1260b29215 | 2024-10-30 02:00:22 | 7fcd2778e2b9a7a86ce36c96078e3f6712d5cd12 | coveralls:
[](https://coveralls.io/builds/70608100)
coverage: 90.229% (+0.02%) from 90.214%
when pulling **96c730308f684b458b329887588f6d6847ac12be on RabiyaF:1232-fixed-multifit-bug**
into **cbbfa71754bb236973ac80ffdf357cf1229ca98c on fitbenchmarking:master**.
| diff --git a/fitbenchmarking/parsing/fitting_problem.py b/fitbenchmarking/parsing/fitting_problem.py
index 788a01a8..c962eb64 100644
--- a/fitbenchmarking/parsing/fitting_problem.py
+++ b/fitbenchmarking/parsing/fitting_problem.py
@@ -296,12 +296,11 @@ class FittingProblem:
"""
if parameter_set not in self._ini_y:
params = self.starting_values[parameter_set].values()
- if self.multifit:
- self._ini_y[parameter_set] = [
- self.eval_model(params=params, x=x) for x in self.data_x
- ]
- else:
- self._ini_y[parameter_set] = self.eval_model(params=params)
+ self._ini_y[parameter_set] = (
+ self.eval_model(params=params, x=self.data_x[0])
+ if self.multifit
+ else self.eval_model(params=params)
+ )
return self._ini_y[parameter_set]
| Multifit and make plots produces a bug
**Description of the error**
When doing multifit problems with make plots set to true I get the error message
```
No registered converter was able to produce a C++ rvalue of type double from this Python object of type numpy.ndarray
Benchmark problems: 100%|█████████████████████████████████████████████████| 2/2 [00:00<00:00, 3.37Benchmark problem/s]
Producing output for the Mantid_MultiFit problem set
Unknown exception. Exiting.
All arrays must be of the same length
```
**Describe the expected result**
I would expect either for it to work or to give a clear error about plots not working with multifit (preferably at the start instead of the end)
| fitbenchmarking/fitbenchmarking | diff --git a/fitbenchmarking/parsing/tests/test_fitting_problem.py b/fitbenchmarking/parsing/tests/test_fitting_problem.py
index b57e2a1d..b479c775 100644
--- a/fitbenchmarking/parsing/tests/test_fitting_problem.py
+++ b/fitbenchmarking/parsing/tests/test_fitting_problem.py
@@ -3,8 +3,10 @@ Test file to test the fitting_problem file.
"""
from unittest import TestCase
+from unittest.mock import patch
import numpy as np
+from parameterized import parameterized
from fitbenchmarking.parsing.fitting_problem import FittingProblem
from fitbenchmarking.utils import exceptions
@@ -160,53 +162,42 @@ class TestFittingProblem(TestCase):
Tests that correct data gives the expected result
"""
fitting_problem = FittingProblem(self.options)
- x_data = np.array([-0.5, 0.0, 1.0, 0.5, 1.5, 2.0, 2.5, 3.0, 4.0])
- y_data = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
- e_data = np.array([1.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 9.0])
- start_x = 0.5
- end_x = 2.5
+ fitting_problem.data_x = np.array(
+ [-0.5, 0.0, 1.0, 0.5, 1.5, 2.0, 2.5, 3.0, 4.0]
+ )
+ fitting_problem.data_y = np.array(
+ [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
+ )
+ fitting_problem.data_e = np.array(
+ [1.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 9.0]
+ )
+ fitting_problem.start_x = 0.5
+ fitting_problem.end_x = 2.5
+
expected_x_data = np.array([0.5, 1.0, 1.5, 2.0, 2.5])
expected_y_data = np.array([3.0, 2.0, 4.0, 5.0, 6.0])
expected_e_data = np.array([40.0, 30.0, 50.0, 60.0, 70.0])
- fitting_problem.data_x = x_data
- fitting_problem.data_y = y_data
- fitting_problem.data_e = e_data
- fitting_problem.start_x = start_x
- fitting_problem.end_x = end_x
-
fitting_problem.correct_data()
+
+ sort = fitting_problem.sorted_index
self.assertTrue(
- np.isclose(
- fitting_problem.data_x[fitting_problem.sorted_index],
- expected_x_data,
- ).all()
+ (fitting_problem.data_x[sort] == expected_x_data).all()
)
self.assertTrue(
- np.isclose(
- fitting_problem.data_y[fitting_problem.sorted_index],
- expected_y_data,
- ).all()
+ (fitting_problem.data_y[sort] == expected_y_data).all()
)
self.assertTrue(
- np.isclose(
- fitting_problem.data_e[fitting_problem.sorted_index],
- expected_e_data,
- ).all()
+ (fitting_problem.data_e[sort] == expected_e_data).all()
)
+
self.options.cost_func_type = ["nlls"]
fitting_problem.correct_data()
self.assertTrue(
- np.isclose(
- fitting_problem.data_x[fitting_problem.sorted_index],
- expected_x_data,
- ).all()
+ (fitting_problem.data_x[sort] == expected_x_data).all()
)
self.assertTrue(
- np.isclose(
- fitting_problem.data_y[fitting_problem.sorted_index],
- expected_y_data,
- ).all()
+ (fitting_problem.data_y[sort] == expected_y_data).all()
)
self.assertIs(fitting_problem.data_e, None)
@@ -216,23 +207,24 @@ class TestFittingProblem(TestCase):
"""
fitting_problem = FittingProblem(self.options)
fitting_problem.multifit = True
- x_data = [
+ fitting_problem.data_x = [
np.array([-0.5, 0.0, 1.0, 0.5, 1.5, 2.0, 2.5, 3.0, 4.0]),
np.array([-0.5, 0.0, 1.0, 0.5, 1.4, 2.0, 2.5, 3.0, 4.0]),
np.array([-0.5, 0.0, 1.0, 0.5, 1.7, 2.0, 2.5, 3.0, 4.0]),
]
- y_data = [
+ fitting_problem.data_y = [
np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
np.array([0.0, 1.0, 2.0, 3.0, 24.0, 5.0, 6.0, 7.0, 8.0]),
np.array([0.0, 1.0, 2.8, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
]
- e_data = [
+ fitting_problem.data_e = [
np.array([1.0, 20.0, 30.0, 40.0, 50.0, 60.0, 1.0, 6.0, 9.0]),
np.array([1.0, 20.0, 30.0, 40.0, 50.0, 60.0, 1.0, 6.0, 9.0]),
np.array([1.0, 20.0, 30.0, 40.0, 50.0, 60.0, 1.0, 6.0, 9.0]),
]
- start_x = [0.5, 1.1, 0.0]
- end_x = [2.5, 2.6, 1.0]
+ fitting_problem.start_x = [0.5, 1.1, 0.0]
+ fitting_problem.end_x = [2.5, 2.6, 1.0]
+
expected_x_data = [
np.array([0.5, 1.0, 1.5, 2.0, 2.5]),
np.array([1.4, 2.0, 2.5]),
@@ -249,46 +241,46 @@ class TestFittingProblem(TestCase):
np.array([20.0, 40.0, 30.0]),
]
- fitting_problem.data_x = x_data
- fitting_problem.data_y = y_data
- fitting_problem.data_e = e_data
- fitting_problem.start_x = start_x
- fitting_problem.end_x = end_x
-
fitting_problem.correct_data()
- for i in range(3):
+
+ for ix, sort in enumerate(fitting_problem.sorted_index):
self.assertTrue(
- np.isclose(
- fitting_problem.data_x[i][fitting_problem.sorted_index[i]],
- expected_x_data[i],
- ).all()
+ (fitting_problem.data_x[ix][sort] == expected_x_data[ix]).all()
)
self.assertTrue(
- np.isclose(
- fitting_problem.data_y[i][fitting_problem.sorted_index[i]],
- expected_y_data[i],
- ).all()
+ (fitting_problem.data_y[ix][sort] == expected_y_data[ix]).all()
)
self.assertTrue(
- np.isclose(
- fitting_problem.data_e[i][fitting_problem.sorted_index[i]],
- expected_e_data[i],
- ).all()
+ (fitting_problem.data_e[ix][sort] == expected_e_data[ix]).all()
)
self.options.cost_func_type = ["nlls"]
fitting_problem.correct_data()
- for i in range(3):
+ for ix, sort in enumerate(fitting_problem.sorted_index):
self.assertTrue(
- np.isclose(
- fitting_problem.data_x[i][fitting_problem.sorted_index[i]],
- expected_x_data[i],
- ).all()
+ (fitting_problem.data_x[ix][sort] == expected_x_data[ix]).all()
)
self.assertTrue(
- np.isclose(
- fitting_problem.data_y[i][fitting_problem.sorted_index[i]],
- expected_y_data[i],
- ).all()
+ (fitting_problem.data_y[ix][sort] == expected_y_data[ix]).all()
)
- self.assertIs(fitting_problem.data_e[i], None)
+ self.assertIs(fitting_problem.data_e[ix], None)
+
+ @parameterized.expand(
+ [
+ (True, [np.array([1, 2]), np.array([3, 4])], ["params", "x"]),
+ (False, np.array([1, 2]), ["params"]),
+ ]
+ )
+ @patch("fitbenchmarking.parsing.fitting_problem.FittingProblem.eval_model")
+ def test_ini_y_args(self, multifit, data_x, args, mock):
+ """
+ Tests ini_y calls eval_model using the right args.
+ """
+ fitting_problem = FittingProblem(self.options)
+ fitting_problem.multifit = multifit
+ fitting_problem.data_x = data_x
+ fitting_problem.starting_values = [{0: "0"}]
+
+ fitting_problem.ini_y()
+ self.assertEqual(mock.call_count, 1)
+ self.assertEqual(list(mock.call_args[1].keys()), args)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"coveralls",
"coverage[toml]",
"ruff",
"pandas",
"iminuit",
"pre-commit",
"parameterized"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | arrow==1.3.0
blinker==1.9.0
certifi==2025.1.31
cfgv==3.4.0
charset-normalizer==3.4.1
click==8.1.8
codecarbon==2.6.0
configparser==7.2.0
contourpy==1.3.0
coverage==7.8.0
coveralls==4.0.1
cycler==0.12.1
dash==3.0.1
distlib==0.3.9
docopt==0.6.2
docutils==0.21.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
-e git+https://github.com/fitbenchmarking/fitbenchmarking.git@c451994dba5cf826520ab10f894f3b1260b29215#egg=FitBenchmarking
Flask==3.0.3
fonttools==4.56.0
identify==2.6.9
idna==3.10
iminuit==2.30.1
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
itsdangerous==2.2.0
Jinja2==3.1.6
kiwisolver==1.4.7
lxml==5.3.1
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.9.4
mdurl==0.1.2
narwhals==1.32.0
nest-asyncio==1.6.0
nodeenv==1.9.1
numpy==2.0.2
nvidia-ml-py==12.570.86
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
parameterized==0.9.0
pillow==11.1.0
platformdirs==4.3.7
plotly==6.0.1
pluggy @ file:///croot/pluggy_1733169602837/work
pre_commit==4.2.0
prometheus_client==0.21.1
prompt_toolkit==3.0.50
psutil==7.0.0
py-cpuinfo==9.0.0
Pygments==2.19.1
pynvml==12.0.0
pyparsing==3.2.3
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
questionary==2.1.0
RapidFuzz==3.12.2
requests==2.32.3
retrying==1.3.4
rich==14.0.0
ruff==0.11.2
scipy==1.13.1
shellingham==1.5.4
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tqdm==4.67.1
typer==0.15.2
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
virtualenv==20.29.3
wcwidth==0.2.13
Werkzeug==3.0.6
zipp==3.21.0
| name: fitbenchmarking
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- arrow==1.3.0
- blinker==1.9.0
- certifi==2025.1.31
- cfgv==3.4.0
- charset-normalizer==3.4.1
- click==8.1.8
- codecarbon==2.6.0
- configparser==7.2.0
- contourpy==1.3.0
- coverage==7.8.0
- coveralls==4.0.1
- cycler==0.12.1
- dash==3.0.1
- distlib==0.3.9
- docopt==0.6.2
- docutils==0.21.2
- filelock==3.18.0
- fitbenchmarking==0.0.0.dev1
- flask==3.0.3
- fonttools==4.56.0
- identify==2.6.9
- idna==3.10
- iminuit==2.30.1
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- itsdangerous==2.2.0
- jinja2==3.1.6
- kiwisolver==1.4.7
- lxml==5.3.1
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib==3.9.4
- mdurl==0.1.2
- narwhals==1.32.0
- nest-asyncio==1.6.0
- nodeenv==1.9.1
- numpy==2.0.2
- nvidia-ml-py==12.570.86
- pandas==2.2.3
- parameterized==0.9.0
- pillow==11.1.0
- platformdirs==4.3.7
- plotly==6.0.1
- pre-commit==4.2.0
- prometheus-client==0.21.1
- prompt-toolkit==3.0.50
- psutil==7.0.0
- py-cpuinfo==9.0.0
- pygments==2.19.1
- pynvml==12.0.0
- pyparsing==3.2.3
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- questionary==2.1.0
- rapidfuzz==3.12.2
- requests==2.32.3
- retrying==1.3.4
- rich==14.0.0
- ruff==0.11.2
- scipy==1.13.1
- shellingham==1.5.4
- six==1.17.0
- tqdm==4.67.1
- typer==0.15.2
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==2.3.0
- virtualenv==20.29.3
- wcwidth==0.2.13
- werkzeug==3.0.6
- zipp==3.21.0
prefix: /opt/conda/envs/fitbenchmarking
| [
"fitbenchmarking/parsing/tests/test_fitting_problem.py::TestFittingProblem::test_ini_y_args_0"
] | [] | [
"fitbenchmarking/parsing/tests/test_fitting_problem.py::TestFittingProblem::test_correct_data_multi_fit",
"fitbenchmarking/parsing/tests/test_fitting_problem.py::TestFittingProblem::test_correct_data_single_fit",
"fitbenchmarking/parsing/tests/test_fitting_problem.py::TestFittingProblem::test_eval_model_correct_evaluation",
"fitbenchmarking/parsing/tests/test_fitting_problem.py::TestFittingProblem::test_eval_model_raise_error",
"fitbenchmarking/parsing/tests/test_fitting_problem.py::TestFittingProblem::test_fitting_problem_str",
"fitbenchmarking/parsing/tests/test_fitting_problem.py::TestFittingProblem::test_get_function_params",
"fitbenchmarking/parsing/tests/test_fitting_problem.py::TestFittingProblem::test_ini_y_args_1",
"fitbenchmarking/parsing/tests/test_fitting_problem.py::TestFittingProblem::test_set_value_ranges",
"fitbenchmarking/parsing/tests/test_fitting_problem.py::TestFittingProblem::test_set_value_ranges_incorrect_names",
"fitbenchmarking/parsing/tests/test_fitting_problem.py::TestFittingProblem::test_verify_problem"
] | [] | BSD 3-Clause "New" or "Revised" License | 20,109 | 249 | [
"fitbenchmarking/parsing/fitting_problem.py"
] |
tobymao__sqlglot-4328 | 5c1b1f43014967f6853752ba8d0899757a3efcd5 | 2024-10-31 10:18:55 | c1456d07097c42a2ba2078ad30a8afe4cc89597d | VaggelisD: I agree, as discussed in Slack the time units are not handled very well currently (e.g code duplication between `exp.TimeUnit` and `exp.DateTrunc`, unable to override per dialect etc).
However, I entertained the idea of refactoring and it seems like a sizable task; We'd have to move part of that code to `Dialect` and rework `exp.TimeUnit` to be normalized during construction per dialect, which I'm assuming would have to go through builders. This will disrupt most of the unit-based Expression parsing today afaict, right?
It might be worth exploring asap if more edge cases like these creep up. | diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
index 982907d0..1025a075 100644
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -121,7 +121,9 @@ class Oracle(Dialect):
"TO_TIMESTAMP": build_formatted_time(exp.StrToTime, "oracle"),
"TO_DATE": build_formatted_time(exp.StrToDate, "oracle"),
"TRUNC": lambda args: exp.DateTrunc(
- unit=seq_get(args, 1) or exp.Literal.string("DD"), this=seq_get(args, 0)
+ unit=seq_get(args, 1) or exp.Literal.string("DD"),
+ this=seq_get(args, 0),
+ unabbreviate=False,
),
}
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 0924fd53..540d5331 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -1087,3 +1087,12 @@ class Snowflake(Dialect):
return self.func(
f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
)
+
+ def timestampsub_sql(self, expression: exp.TimestampSub):
+ return self.sql(
+ exp.TimestampAdd(
+ this=expression.this,
+ expression=expression.expression * -1,
+ unit=expression.unit,
+ )
+ )
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 9a9073a4..794f6d12 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -5503,11 +5503,17 @@ class DateTrunc(Func):
arg_types = {"unit": True, "this": True, "zone": False}
def __init__(self, **args):
+ # Across most dialects it's safe to unabbreviate the unit (e.g. 'Q' -> 'QUARTER') except Oracle
+ # https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ROUND-and-TRUNC-Date-Functions.html
+ unabbreviate = args.pop("unabbreviate", True)
+
unit = args.get("unit")
if isinstance(unit, TimeUnit.VAR_LIKE):
- args["unit"] = Literal.string(
- (TimeUnit.UNABBREVIATED_UNIT_NAME.get(unit.name) or unit.name).upper()
- )
+ unit_name = unit.name.upper()
+ if unabbreviate and unit_name in TimeUnit.UNABBREVIATED_UNIT_NAME:
+ unit_name = TimeUnit.UNABBREVIATED_UNIT_NAME[unit_name]
+
+ args["unit"] = Literal.string(unit_name)
elif isinstance(unit, Week):
unit.set("this", Literal.string(unit.this.name.upper()))
| unit generated in oracle sql is incorrect when input is "Q"
```
In [6]: import sqlglot as sg, sqlglot.expressions as sge
In [7]: sg.func("trunc", sg.to_identifier("x"), sge.convert("Q")).sql("oracle")
Out[7]: "TRUNC(x, 'Q')"
In [8]: sg.func("trunc", sg.to_identifier("x"), sge.convert("Q"), dialect="oracle").sql("oracle")
Out[8]: "TRUNC(x, 'QUARTER')"
```
`ROUND`/`TRUNC` docs showing units: https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ROUND-and-TRUNC-Date-Functions.html#GUID-8E10AB76-21DA-490F-A389-023B648DDEF8 | tobymao/sqlglot | diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index cf38f95e..96c5a4e4 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -697,6 +697,14 @@ LANGUAGE js AS
write={
"bigquery": "SELECT TIMESTAMP_SUB(CAST('2008-12-25 15:30:00+00' AS TIMESTAMP), INTERVAL '10' MINUTE)",
"mysql": "SELECT DATE_SUB(TIMESTAMP('2008-12-25 15:30:00+00'), INTERVAL '10' MINUTE)",
+ "snowflake": "SELECT TIMESTAMPADD(MINUTE, '10' * -1, CAST('2008-12-25 15:30:00+00' AS TIMESTAMPTZ))",
+ },
+ )
+ self.validate_all(
+ 'SELECT TIMESTAMP_SUB(TIMESTAMP "2008-12-25 15:30:00+00", INTERVAL col MINUTE)',
+ write={
+ "bigquery": "SELECT TIMESTAMP_SUB(CAST('2008-12-25 15:30:00+00' AS TIMESTAMP), INTERVAL col MINUTE)",
+ "snowflake": "SELECT TIMESTAMPADD(MINUTE, col * -1, CAST('2008-12-25 15:30:00+00' AS TIMESTAMPTZ))",
},
)
self.validate_all(
diff --git a/tests/dialects/test_oracle.py b/tests/dialects/test_oracle.py
index 36ce5d02..0784810a 100644
--- a/tests/dialects/test_oracle.py
+++ b/tests/dialects/test_oracle.py
@@ -119,13 +119,6 @@ class TestOracle(Validator):
"tsql": UnsupportedError,
},
)
- self.validate_all(
- "TRUNC(SYSDATE, 'YEAR')",
- write={
- "clickhouse": "DATE_TRUNC('YEAR', CURRENT_TIMESTAMP())",
- "oracle": "TRUNC(SYSDATE, 'YEAR')",
- },
- )
self.validate_all(
"SELECT * FROM test WHERE MOD(col1, 4) = 3",
read={
@@ -632,3 +625,20 @@ WHERE
self.validate_identity("GRANT UPDATE, TRIGGER ON TABLE t TO anita, zhi")
self.validate_identity("GRANT EXECUTE ON PROCEDURE p TO george")
self.validate_identity("GRANT USAGE ON SEQUENCE order_id TO sales_role")
+
+ def test_datetrunc(self):
+ self.validate_all(
+ "TRUNC(SYSDATE, 'YEAR')",
+ write={
+ "clickhouse": "DATE_TRUNC('YEAR', CURRENT_TIMESTAMP())",
+ "oracle": "TRUNC(SYSDATE, 'YEAR')",
+ },
+ )
+
+ # Make sure units are not normalized e.g 'Q' -> 'QUARTER' and 'W' -> 'WEEK'
+ # https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ROUND-and-TRUNC-Date-Functions.html
+ for unit in (
+ "'Q'",
+ "'W'",
+ ):
+ self.validate_identity(f"TRUNC(x, {unit})")
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 3
} | 25.28 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
identify==2.6.9
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pre_commit==4.2.0
Pygments==2.19.1
pytest @ file:///croot/pytest_1738938843180/work
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.4.3
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@5c1b1f43014967f6853752ba8d0899757a3efcd5#egg=sqlglot
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- distlib==0.3.9
- duckdb==1.2.1
- filelock==3.18.0
- identify==2.6.9
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pre-commit==4.2.0
- pygments==2.19.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.4.3
- six==1.17.0
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery",
"tests/dialects/test_oracle.py::TestOracle::test_datetrunc"
] | [] | [
"tests/dialects/test_bigquery.py::TestBigQuery::test_convert",
"tests/dialects/test_bigquery.py::TestBigQuery::test_errors",
"tests/dialects/test_bigquery.py::TestBigQuery::test_gap_fill",
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_inline_constructor",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_object",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::test_mod",
"tests/dialects/test_bigquery.py::TestBigQuery::test_models",
"tests/dialects/test_bigquery.py::TestBigQuery::test_null_ordering",
"tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names",
"tests/dialects/test_bigquery.py::TestBigQuery::test_range_type",
"tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types",
"tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table",
"tests/dialects/test_bigquery.py::TestBigQuery::test_unnest",
"tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions",
"tests/dialects/test_bigquery.py::TestBigQuery::test_warnings",
"tests/dialects/test_oracle.py::TestOracle::test_connect_by",
"tests/dialects/test_oracle.py::TestOracle::test_grant",
"tests/dialects/test_oracle.py::TestOracle::test_hints",
"tests/dialects/test_oracle.py::TestOracle::test_join_marker",
"tests/dialects/test_oracle.py::TestOracle::test_json_functions",
"tests/dialects/test_oracle.py::TestOracle::test_json_table",
"tests/dialects/test_oracle.py::TestOracle::test_match_recognize",
"tests/dialects/test_oracle.py::TestOracle::test_multitable_inserts",
"tests/dialects/test_oracle.py::TestOracle::test_oracle",
"tests/dialects/test_oracle.py::TestOracle::test_query_restrictions",
"tests/dialects/test_oracle.py::TestOracle::test_xml_table"
] | [] | MIT License | 20,125 | 724 | [
"sqlglot/dialects/oracle.py",
"sqlglot/dialects/snowflake.py",
"sqlglot/expressions.py"
] |
lepture__mistune-393 | 0772c78bc0a62771768278263ef740345f58c0f0 | 2024-10-31 15:31:28 | 0772c78bc0a62771768278263ef740345f58c0f0 | diff --git a/src/mistune/inline_parser.py b/src/mistune/inline_parser.py
index 21b04c1..97423db 100644
--- a/src/mistune/inline_parser.py
+++ b/src/mistune/inline_parser.py
@@ -19,7 +19,7 @@ from .helpers import (
parse_link_text,
unescape_char,
)
-from .util import escape, escape_url, unikey
+from .util import escape_url, unikey
PAREN_END_RE = re.compile(r'\s*\)')
@@ -310,7 +310,7 @@ class InlineParser(Parser[InlineState]):
if len(code.strip()):
if code.startswith(' ') and code.endswith(' '):
code = code[1:-1]
- state.append_token({'type': 'codespan', 'raw': escape(code)})
+ state.append_token({'type': 'codespan', 'raw': code})
return end_pos
else:
state.append_token({'type': 'text', 'raw': marker})
diff --git a/src/mistune/renderers/html.py b/src/mistune/renderers/html.py
index 0d999a7..a8d2c47 100644
--- a/src/mistune/renderers/html.py
+++ b/src/mistune/renderers/html.py
@@ -91,7 +91,7 @@ class HTMLRenderer(BaseRenderer):
return s + ' />'
def codespan(self, text: str) -> str:
- return '<code>' + text + '</code>'
+ return '<code>' + escape_text(text) + '</code>'
def linebreak(self) -> str:
return '<br />\n'
| Certain characters in inline code incorrectly parsed (e.g., `&`)
MWE:
```python
import mistune
from mistune.core import BlockState
markdown = mistune.create_markdown(renderer="ast")
md = r"`&<>`"
tokens = markdown(md)
print(tokens)
```
Output:
```python
[{'type': 'paragraph', 'children': [{'type': 'codespan', 'raw': '&<>'}]}]
``` | lepture/mistune | diff --git a/tests/test_misc.py b/tests/test_misc.py
index f57c53b..5e21a1a 100644
--- a/tests/test_misc.py
+++ b/tests/test_misc.py
@@ -77,7 +77,7 @@ class TestMiscCases(TestCase):
def test_ast_output(self):
md = mistune.create_markdown(escape=False, renderer=None)
- text = '# h1\n\nfoo **bar**'
+ text = '# h1\n\nfoo **bar**\n\n`&<>"`'
result = md(text)
expected = [
{
@@ -94,6 +94,13 @@ class TestMiscCases(TestCase):
{'type': 'strong', 'children': [{'type': 'text', 'raw': 'bar'}]}
]
},
+ {'type': 'blank_line'},
+ {
+ 'type': 'paragraph',
+ 'children': [
+ {'type': 'codespan', 'raw': '&<>"'},
+ ]
+ },
]
self.assertEqual(result, expected)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"mypy"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/lepture/mistune.git@0772c78bc0a62771768278263ef740345f58c0f0#egg=mistune
mypy==1.15.0
mypy-extensions==1.0.0
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
| name: mistune
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- mistune==3.0.2
- mypy==1.15.0
- mypy-extensions==1.0.0
- pytest-cov==6.0.0
- typing-extensions==4.13.0
prefix: /opt/conda/envs/mistune
| [
"tests/test_misc.py::TestMiscCases::test_ast_output"
] | [] | [
"tests/test_misc.py::TestMiscCases::test_allow_data_protocols",
"tests/test_misc.py::TestMiscCases::test_allow_harmful_protocols",
"tests/test_misc.py::TestMiscCases::test_before_parse_hooks",
"tests/test_misc.py::TestMiscCases::test_emsp",
"tests/test_misc.py::TestMiscCases::test_escape_html",
"tests/test_misc.py::TestMiscCases::test_hard_wrap",
"tests/test_misc.py::TestMiscCases::test_harmful_links",
"tests/test_misc.py::TestMiscCases::test_html_tag_text_following_list",
"tests/test_misc.py::TestMiscCases::test_markdown_func",
"tests/test_misc.py::TestMiscCases::test_none",
"tests/test_misc.py::TestMiscCases::test_ref_link",
"tests/test_misc.py::TestMiscCases::test_use_plugin"
] | [] | BSD 3-Clause "New" or "Revised" License | 20,127 | 391 | [
"src/mistune/inline_parser.py",
"src/mistune/renderers/html.py"
] |
|
fitbenchmarking__fitbenchmarking-1358 | f88b68edb089a0fa66f94df45fb504490d495839 | 2024-10-31 19:01:33 | 7fcd2778e2b9a7a86ce36c96078e3f6712d5cd12 | coveralls:
[](https://coveralls.io/builds/70646971)
coverage: 90.208% (-0.006%) from 90.214%
when pulling **adb1440a3a1857e791676a2628237c97623a29fe on RabiyaF:1335-fixed-bad-error-msg**
into **cbbfa71754bb236973ac80ffdf357cf1229ca98c on fitbenchmarking:master**.
| diff --git a/fitbenchmarking/utils/options.py b/fitbenchmarking/utils/options.py
index 059a23dc..021fe716 100644
--- a/fitbenchmarking/utils/options.py
+++ b/fitbenchmarking/utils/options.py
@@ -709,17 +709,16 @@ class Options:
value = None
if option in self.VALID[section]:
- if isinstance(value, list):
- set1 = set(value)
- set2 = set(self.VALID[section][option])
- value_check = set1.issubset(set2)
- else:
- value_check = value in self.VALID[section][option]
- if not value_check:
+ value_list = [value] if not isinstance(value, list) else value
+ invalid_values = [
+ v for v in value_list if v not in self.VALID[section][option]
+ ]
+ if invalid_values:
self.error_message.append(
- f"The option '{option}: {value}' in the ini file is "
- f"invalid. {option} must be one or more of "
- f"{self.VALID[section][option]}"
+ f"The option '{option}: {value}' in the ini "
+ f"file is invalid. {invalid_values} is not a "
+ f"valid option. {option} must be one or "
+ f"more of {self.VALID[section][option]}"
)
return value
| Bad options error message
When running a fit with a typo in my options file, I got this:
```
Error while running FitBenchmarking. Exiting. See below for more information.
Failed to process options.
Details: The option 'bumps: ['amoeba', 'de', 'dream', 'lm-bumps', 'newton', 'scipy-leastsp']' in the ini file is invalid. bumps must be one or more of ['amoeba', 'lm-bumps', 'newton', 'de', 'scipy-leastsq', 'dream']
```
This could highlight that the `scipy-seastsp` entry is the issue.
| fitbenchmarking/fitbenchmarking | diff --git a/fitbenchmarking/utils/tests/test_options_minimizers.py b/fitbenchmarking/utils/tests/test_options_minimizers.py
index 09c2c06a..5d7bcc2c 100644
--- a/fitbenchmarking/utils/tests/test_options_minimizers.py
+++ b/fitbenchmarking/utils/tests/test_options_minimizers.py
@@ -339,3 +339,19 @@ class UserMininimizerOptionTests(unittest.TestCase):
opts_file = self.generate_user_ini_file(options_set, software)
with self.assertRaises(exceptions.OptionsError):
Options(opts_file)
+
+ def test_error_msg(self):
+ """
+ Tests the error msg when wrong minimizer is selected.
+ """
+ opts_file = self.generate_user_ini_file(["ameoba", "de"], "bumps")
+ msg = (
+ "Failed to process options.\nDetails: The option 'bumps:"
+ " ['ameoba', 'de']' in the ini file is invalid. ['ameoba']"
+ " is not a valid option. bumps must be one or more of"
+ " ['amoeba', 'lm-bumps', 'newton', 'de', 'scipy-leastsq',"
+ " 'dream']"
+ )
+ with self.assertRaises(exceptions.OptionsError) as exp:
+ Options(opts_file)
+ self.assertEqual(str(exp.exception), msg)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | arrow==1.3.0
blinker==1.9.0
certifi==2025.1.31
cfgv==3.4.0
charset-normalizer==3.4.1
click==8.1.8
codecarbon==2.6.0
configparser==7.2.0
contourpy==1.3.0
coverage==7.8.0
coveralls==4.0.1
cycler==0.12.1
dash==3.0.1
distlib==0.3.9
docopt==0.6.2
docutils==0.21.2
exceptiongroup==1.2.2
filelock==3.18.0
-e git+https://github.com/fitbenchmarking/fitbenchmarking.git@f88b68edb089a0fa66f94df45fb504490d495839#egg=FitBenchmarking
Flask==3.0.3
fonttools==4.56.0
identify==2.6.9
idna==3.10
iminuit==2.30.1
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig==2.1.0
itsdangerous==2.2.0
Jinja2==3.1.6
kiwisolver==1.4.7
lxml==5.3.1
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.9.4
mdurl==0.1.2
narwhals==1.32.0
nest-asyncio==1.6.0
nodeenv==1.9.1
numpy==2.0.2
nvidia-ml-py==12.570.86
packaging==24.2
pandas==2.2.3
parameterized==0.9.0
pillow==11.1.0
platformdirs==4.3.7
plotly==6.0.1
pluggy==1.5.0
pre_commit==4.2.0
prometheus_client==0.21.1
prompt_toolkit==3.0.50
psutil==7.0.0
py-cpuinfo==9.0.0
Pygments==2.19.1
pynvml==12.0.0
pyparsing==3.2.3
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
questionary==2.1.0
RapidFuzz==3.12.2
requests==2.32.3
retrying==1.3.4
rich==14.0.0
ruff==0.11.2
scipy==1.13.1
shellingham==1.5.4
six==1.17.0
tomli==2.2.1
tqdm==4.67.1
typer==0.15.2
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
virtualenv==20.29.3
wcwidth==0.2.13
Werkzeug==3.0.6
zipp==3.21.0
| name: fitbenchmarking
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- arrow==1.3.0
- blinker==1.9.0
- certifi==2025.1.31
- cfgv==3.4.0
- charset-normalizer==3.4.1
- click==8.1.8
- codecarbon==2.6.0
- configparser==7.2.0
- contourpy==1.3.0
- coverage==7.8.0
- coveralls==4.0.1
- cycler==0.12.1
- dash==3.0.1
- distlib==0.3.9
- docopt==0.6.2
- docutils==0.21.2
- exceptiongroup==1.2.2
- filelock==3.18.0
- fitbenchmarking==0.0.0.dev1
- flask==3.0.3
- fonttools==4.56.0
- identify==2.6.9
- idna==3.10
- iminuit==2.30.1
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- itsdangerous==2.2.0
- jinja2==3.1.6
- kiwisolver==1.4.7
- lxml==5.3.1
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib==3.9.4
- mdurl==0.1.2
- narwhals==1.32.0
- nest-asyncio==1.6.0
- nodeenv==1.9.1
- numpy==2.0.2
- nvidia-ml-py==12.570.86
- packaging==24.2
- pandas==2.2.3
- parameterized==0.9.0
- pillow==11.1.0
- platformdirs==4.3.7
- plotly==6.0.1
- pluggy==1.5.0
- pre-commit==4.2.0
- prometheus-client==0.21.1
- prompt-toolkit==3.0.50
- psutil==7.0.0
- py-cpuinfo==9.0.0
- pygments==2.19.1
- pynvml==12.0.0
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- questionary==2.1.0
- rapidfuzz==3.12.2
- requests==2.32.3
- retrying==1.3.4
- rich==14.0.0
- ruff==0.11.2
- scipy==1.13.1
- shellingham==1.5.4
- six==1.17.0
- tomli==2.2.1
- tqdm==4.67.1
- typer==0.15.2
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==2.3.0
- virtualenv==20.29.3
- wcwidth==0.2.13
- werkzeug==3.0.6
- zipp==3.21.0
prefix: /opt/conda/envs/fitbenchmarking
| [
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_error_msg"
] | [] | [
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_00_bumps",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_01_dfo",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_02_gsl",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_03_mantid",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_04_minuit",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_05_ralfit",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_06_scipy",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_07_scipy_ls",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_08_scipy_leastsq",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_09_nlopt",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_10_ceres",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_11_gradient_free",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_12_horace",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_13_levmar",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_14_lmfit",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_15_matlab",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_16_matlab_curve",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_17_matlab_opt",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_18_matlab_stats",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_19_theseus",
"fitbenchmarking/utils/tests/test_options_minimizers.py::MininimizerOptionTests::test_minimizers_for_softwares_in_options_20_scipy_go",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_00",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_01",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_02",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_03",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_04",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_05",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_06",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_07",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_08",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_09",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_10",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_11",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_12",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_13",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_14",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_15",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_16",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_17",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_18",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_19",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_20",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_invalid_21",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_00",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_01",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_02",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_03",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_04",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_05",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_06",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_07",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_08",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_09",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_10",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_11",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_12",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_13",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_14",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_15",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_16",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_17",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_18",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_19",
"fitbenchmarking/utils/tests/test_options_minimizers.py::UserMininimizerOptionTests::test_minimizer_choice_valid_20"
] | [] | BSD 3-Clause "New" or "Revised" License | 20,129 | 334 | [
"fitbenchmarking/utils/options.py"
] |
narwhals-dev__narwhals-1303 | 5c3db5b149cbf64b97de49bc4ad726f019f9f578 | 2024-11-02 10:17:14 | 5c3db5b149cbf64b97de49bc4ad726f019f9f578 | diff --git a/narwhals/_pandas_like/dataframe.py b/narwhals/_pandas_like/dataframe.py
index 7be808af..796386fa 100644
--- a/narwhals/_pandas_like/dataframe.py
+++ b/narwhals/_pandas_like/dataframe.py
@@ -698,21 +698,36 @@ class PandasLikeDataFrame:
# pandas default differs from Polars, but cuDF default is True
copy = self._implementation is Implementation.CUDF
+ to_convert = [
+ key
+ for key, val in self.schema.items()
+ if val == self._dtypes.Datetime and val.time_zone is not None # type: ignore[attr-defined]
+ ]
+ if to_convert:
+ df = self.with_columns(
+ self.__narwhals_namespace__()
+ .col(*to_convert)
+ .dt.convert_time_zone("UTC")
+ .dt.replace_time_zone(None)
+ )._native_frame
+ else:
+ df = self._native_frame
+
if dtype is not None:
- return self._native_frame.to_numpy(dtype=dtype, copy=copy)
+ return df.to_numpy(dtype=dtype, copy=copy)
# pandas return `object` dtype for nullable dtypes if dtype=None,
# so we cast each Series to numpy and let numpy find a common dtype.
# If there aren't any dtypes where `to_numpy()` is "broken" (i.e. it
# returns Object) then we just call `to_numpy()` on the DataFrame.
- for col_dtype in self._native_frame.dtypes:
+ for col_dtype in df.dtypes:
if str(col_dtype) in PANDAS_TO_NUMPY_DTYPE_MISSING:
import numpy as np # ignore-banned-import
return np.hstack(
[self[col].to_numpy(copy=copy)[:, None] for col in self.columns]
)
- return self._native_frame.to_numpy(copy=copy)
+ return df.to_numpy(copy=copy)
def to_pandas(self) -> Any:
if self._implementation is Implementation.PANDAS:
diff --git a/narwhals/_pandas_like/series.py b/narwhals/_pandas_like/series.py
index 35df78e2..078e857b 100644
--- a/narwhals/_pandas_like/series.py
+++ b/narwhals/_pandas_like/series.py
@@ -511,34 +511,30 @@ class PandasLikeSeries:
# the default is meant to be None, but pandas doesn't allow it?
# https://numpy.org/doc/stable/reference/generated/numpy.ndarray.__array__.html
copy = copy or self._implementation is Implementation.CUDF
+ if self.dtype == self._dtypes.Datetime and self.dtype.time_zone is not None: # type: ignore[attr-defined]
+ s = self.dt.convert_time_zone("UTC").dt.replace_time_zone(None)._native_series
+ else:
+ s = self._native_series
- has_missing = self._native_series.isna().any()
- if (
- has_missing
- and str(self._native_series.dtype) in PANDAS_TO_NUMPY_DTYPE_MISSING
- ):
+ has_missing = s.isna().any()
+ if has_missing and str(s.dtype) in PANDAS_TO_NUMPY_DTYPE_MISSING:
if self._implementation is Implementation.PANDAS and self._backend_version < (
1,
): # pragma: no cover
kwargs = {}
else:
kwargs = {"na_value": float("nan")}
- return self._native_series.to_numpy(
- dtype=dtype
- or PANDAS_TO_NUMPY_DTYPE_MISSING[str(self._native_series.dtype)],
+ return s.to_numpy(
+ dtype=dtype or PANDAS_TO_NUMPY_DTYPE_MISSING[str(s.dtype)],
copy=copy,
**kwargs,
)
- if (
- not has_missing
- and str(self._native_series.dtype) in PANDAS_TO_NUMPY_DTYPE_NO_MISSING
- ):
- return self._native_series.to_numpy(
- dtype=dtype
- or PANDAS_TO_NUMPY_DTYPE_NO_MISSING[str(self._native_series.dtype)],
+ if not has_missing and str(s.dtype) in PANDAS_TO_NUMPY_DTYPE_NO_MISSING:
+ return s.to_numpy(
+ dtype=dtype or PANDAS_TO_NUMPY_DTYPE_NO_MISSING[str(s.dtype)],
copy=copy,
)
- return self._native_series.to_numpy(dtype=dtype, copy=copy)
+ return s.to_numpy(dtype=dtype, copy=copy)
def to_pandas(self) -> Any:
if self._implementation is Implementation.PANDAS:
diff --git a/narwhals/_pandas_like/utils.py b/narwhals/_pandas_like/utils.py
index 99181bc1..8074413d 100644
--- a/narwhals/_pandas_like/utils.py
+++ b/narwhals/_pandas_like/utils.py
@@ -30,6 +30,51 @@ PANDAS_LIKE_IMPLEMENTATION = {
Implementation.CUDF,
Implementation.MODIN,
}
+PD_DATETIME_RGX = r"""^
+ datetime64\[
+ (?P<time_unit>s|ms|us|ns) # Match time unit: s, ms, us, or ns
+ (?:, # Begin non-capturing group for optional timezone
+ \s* # Optional whitespace after comma
+ (?P<time_zone> # Start named group for timezone
+ [a-zA-Z\/]+ # Match timezone name, e.g., UTC, America/New_York
+ (?:[+-]\d{2}:\d{2})? # Optional offset in format +HH:MM or -HH:MM
+ | # OR
+ pytz\.FixedOffset\(\d+\) # Match pytz.FixedOffset with integer offset in parentheses
+ ) # End time_zone group
+ )? # End optional timezone group
+ \] # Closing bracket for datetime64
+$"""
+PATTERN_PD_DATETIME = re.compile(PD_DATETIME_RGX, re.VERBOSE)
+PA_DATETIME_RGX = r"""^
+ timestamp\[
+ (?P<time_unit>s|ms|us|ns) # Match time unit: s, ms, us, or ns
+ (?:, # Begin non-capturing group for optional timezone
+ \s?tz= # Match "tz=" prefix
+ (?P<time_zone> # Start named group for timezone
+ [a-zA-Z\/]* # Match timezone name (e.g., UTC, America/New_York)
+ (?: # Begin optional non-capturing group for offset
+ [+-]\d{2}:\d{2} # Match offset in format +HH:MM or -HH:MM
+ )? # End optional offset group
+ ) # End time_zone group
+ )? # End optional timezone group
+ \] # Closing bracket for timestamp
+ \[pyarrow\] # Literal string "[pyarrow]"
+$"""
+PATTERN_PA_DATETIME = re.compile(PA_DATETIME_RGX, re.VERBOSE)
+PD_DURATION_RGX = r"""^
+ timedelta64\[
+ (?P<time_unit>s|ms|us|ns) # Match time unit: s, ms, us, or ns
+ \] # Closing bracket for timedelta64
+$"""
+
+PATTERN_PD_DURATION = re.compile(PD_DURATION_RGX, re.VERBOSE)
+PA_DURATION_RGX = r"""^
+ duration\[
+ (?P<time_unit>s|ms|us|ns) # Match time unit: s, ms, us, or ns
+ \] # Closing bracket for duration
+ \[pyarrow\] # Literal string "[pyarrow]"
+$"""
+PATTERN_PA_DURATION = re.compile(PA_DURATION_RGX, re.VERBOSE)
def validate_column_comparand(index: Any, other: Any) -> Any:
@@ -223,14 +268,6 @@ def native_to_narwhals_dtype(
) -> DType:
dtype = str(native_column.dtype)
- pd_datetime_rgx = (
- r"^datetime64\[(?P<time_unit>s|ms|us|ns)(?:, (?P<time_zone>[a-zA-Z\/]+))?\]$"
- )
- pa_datetime_rgx = r"^timestamp\[(?P<time_unit>s|ms|us|ns)(?:, tz=(?P<time_zone>[a-zA-Z\/]+))?\]\[pyarrow\]$"
-
- pd_duration_rgx = r"^timedelta64\[(?P<time_unit>s|ms|us|ns)\]$"
- pa_duration_rgx = r"^duration\[(?P<time_unit>s|ms|us|ns)\]\[pyarrow\]$"
-
if dtype in {"int64", "Int64", "Int64[pyarrow]", "int64[pyarrow]"}:
return dtypes.Int64()
if dtype in {"int32", "Int32", "Int32[pyarrow]", "int32[pyarrow]"}:
@@ -269,14 +306,14 @@ def native_to_narwhals_dtype(
return dtypes.Boolean()
if dtype == "category" or dtype.startswith("dictionary<"):
return dtypes.Categorical()
- if (match_ := re.match(pd_datetime_rgx, dtype)) or (
- match_ := re.match(pa_datetime_rgx, dtype)
+ if (match_ := PATTERN_PD_DATETIME.match(dtype)) or (
+ match_ := PATTERN_PA_DATETIME.match(dtype)
):
dt_time_unit: Literal["us", "ns", "ms", "s"] = match_.group("time_unit") # type: ignore[assignment]
dt_time_zone: str | None = match_.group("time_zone")
return dtypes.Datetime(dt_time_unit, dt_time_zone)
- if (match_ := re.match(pd_duration_rgx, dtype)) or (
- match_ := re.match(pa_duration_rgx, dtype)
+ if (match_ := PATTERN_PD_DURATION.match(dtype)) or (
+ match_ := PATTERN_PA_DURATION.match(dtype)
):
du_time_unit: Literal["us", "ns", "ms", "s"] = match_.group("time_unit") # type: ignore[assignment]
return dtypes.Duration(du_time_unit)
diff --git a/narwhals/translate.py b/narwhals/translate.py
index 075f9b3a..65cc4422 100644
--- a/narwhals/translate.py
+++ b/narwhals/translate.py
@@ -857,6 +857,13 @@ def to_py_scalar(scalar_like: Any) -> Any:
return scalar_like
np = get_numpy()
+ if (
+ np
+ and isinstance(scalar_like, np.datetime64)
+ and scalar_like.dtype == "datetime64[ns]"
+ ):
+ return datetime(1970, 1, 1) + timedelta(microseconds=scalar_like.item() // 1000)
+
if np and np.isscalar(scalar_like) and hasattr(scalar_like, "item"):
return scalar_like.item()
| bug: fixedoffset time zone results in Unknown Narwhals dtype
```python
In [4]: nw.from_native(pd.Series(pd.to_datetime(['2020-01-01T00:00+01:00'])), series_only=True).dtype
Out[4]: Unknown
```
Expected:
```
Datetime(time_unit='ns', time_zone='+01:00')
``` | narwhals-dev/narwhals | diff --git a/tests/conftest.py b/tests/conftest.py
index d40d1027..922d69e9 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -3,7 +3,6 @@ from __future__ import annotations
import contextlib
from typing import TYPE_CHECKING
from typing import Any
-from typing import Callable
import pandas as pd
import polars as pl
@@ -19,6 +18,7 @@ if TYPE_CHECKING:
from narwhals.typing import IntoDataFrame
from narwhals.typing import IntoFrame
from tests.utils import Constructor
+ from tests.utils import ConstructorEager
with contextlib.suppress(ImportError):
import modin.pandas # noqa: F401
@@ -117,7 +117,7 @@ if get_dask_dataframe() is not None: # pragma: no cover
@pytest.fixture(params=eager_constructors)
def constructor_eager(
request: pytest.FixtureRequest,
-) -> Callable[[Any], IntoDataFrame]:
+) -> ConstructorEager:
return request.param # type: ignore[no-any-return]
diff --git a/tests/dtypes_test.py b/tests/dtypes_test.py
index 0d6363ae..2993521b 100644
--- a/tests/dtypes_test.py
+++ b/tests/dtypes_test.py
@@ -176,3 +176,24 @@ def test_pandas_inplace_modification_1267(request: pytest.FixtureRequest) -> Non
assert snw.dtype == nw.Int64
s[0] = 999.5
assert snw.dtype == nw.Float64
+
+
+def test_pandas_fixed_offset_1302() -> None:
+ result = nw.from_native(
+ pd.Series(pd.to_datetime(["2020-01-01T00:00:00.000000000+01:00"])),
+ series_only=True,
+ ).dtype
+ if PANDAS_VERSION >= (2,):
+ assert result == nw.Datetime("ns", "UTC+01:00")
+ else: # pragma: no cover
+ assert result == nw.Datetime("ns", "pytz.FixedOffset(60)")
+ if PANDAS_VERSION >= (2,):
+ result = nw.from_native(
+ pd.Series(
+ pd.to_datetime(["2020-01-01T00:00:00.000000000+01:00"])
+ ).convert_dtypes(dtype_backend="pyarrow"),
+ series_only=True,
+ ).dtype
+ assert result == nw.Datetime("ns", "+01:00")
+ else: # pragma: no cover
+ pass
diff --git a/tests/frame/to_numpy_test.py b/tests/frame/to_numpy_test.py
index aa3dfc2e..0b631a3d 100644
--- a/tests/frame/to_numpy_test.py
+++ b/tests/frame/to_numpy_test.py
@@ -1,10 +1,15 @@
from __future__ import annotations
+from datetime import datetime
from typing import TYPE_CHECKING
import numpy as np
+import pytest
import narwhals.stable.v1 as nw
+from tests.utils import PANDAS_VERSION
+from tests.utils import PYARROW_VERSION
+from tests.utils import is_windows
if TYPE_CHECKING:
from tests.utils import ConstructorEager
@@ -18,3 +23,32 @@ def test_to_numpy(constructor_eager: ConstructorEager) -> None:
expected = np.array([[1, 3, 2], [4, 4, 6], [7.1, 8, 9]]).T
np.testing.assert_array_equal(result, expected)
assert result.dtype == "float64"
+
+
+def test_to_numpy_tz_aware(
+ constructor_eager: ConstructorEager, request: pytest.FixtureRequest
+) -> None:
+ if (
+ ("pyarrow_table" in str(constructor_eager) and PYARROW_VERSION < (12,))
+ or ("pandas_pyarrow" in str(constructor_eager) and PANDAS_VERSION < (2, 2))
+ or (
+ any(x in str(constructor_eager) for x in ("pyarrow", "modin"))
+ and is_windows()
+ )
+ ):
+ request.applymarker(pytest.mark.xfail)
+ df = nw.from_native(
+ constructor_eager({"a": [datetime(2020, 1, 1), datetime(2020, 1, 2)]}),
+ eager_only=True,
+ )
+ df = df.select(nw.col("a").dt.replace_time_zone("Asia/Kathmandu"))
+ result = df.to_numpy()
+ # for some reason, NumPy uses 'M' for datetimes
+ assert result.dtype.kind == "M"
+ assert (
+ result
+ == np.array(
+ [["2019-12-31T18:15:00.000000"], ["2020-01-01T18:15:00.000000"]],
+ dtype=result.dtype,
+ )
+ ).all()
diff --git a/tests/series_only/to_numpy_test.py b/tests/series_only/to_numpy_test.py
index 966a4444..8e36ac12 100644
--- a/tests/series_only/to_numpy_test.py
+++ b/tests/series_only/to_numpy_test.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+from datetime import datetime
from typing import TYPE_CHECKING
import numpy as np
@@ -7,6 +8,9 @@ import pytest
from numpy.testing import assert_array_equal
import narwhals.stable.v1 as nw
+from tests.utils import PANDAS_VERSION
+from tests.utils import PYARROW_VERSION
+from tests.utils import is_windows
if TYPE_CHECKING:
from tests.utils import ConstructorEager
@@ -30,3 +34,33 @@ def test_to_numpy(
assert s.shape == (3,)
assert_array_equal(s.to_numpy(), np.array(data, dtype=float))
+
+
+def test_to_numpy_tz_aware(
+ constructor_eager: ConstructorEager, request: pytest.FixtureRequest
+) -> None:
+ if (
+ ("pyarrow_table" in str(constructor_eager) and PYARROW_VERSION < (12,))
+ or ("pandas_pyarrow" in str(constructor_eager) and PANDAS_VERSION < (2, 2))
+ or (
+ any(x in str(constructor_eager) for x in ("pyarrow", "modin"))
+ and is_windows()
+ )
+ ):
+ request.applymarker(pytest.mark.xfail)
+ request.applymarker(pytest.mark.xfail)
+ df = nw.from_native(
+ constructor_eager({"a": [datetime(2020, 1, 1), datetime(2020, 1, 2)]}),
+ eager_only=True,
+ )
+ df = df.select(nw.col("a").dt.replace_time_zone("Asia/Kathmandu"))
+ result = df["a"].to_numpy()
+ # for some reason, NumPy uses 'M' for datetimes
+ assert result.dtype.kind == "M"
+ assert (
+ result
+ == np.array(
+ ["2019-12-31T18:15:00.000000", "2020-01-01T18:15:00.000000"],
+ dtype=result.dtype,
+ )
+ ).all()
diff --git a/tests/translate/to_py_scalar_test.py b/tests/translate/to_py_scalar_test.py
index 3519b5e8..b13ba009 100644
--- a/tests/translate/to_py_scalar_test.py
+++ b/tests/translate/to_py_scalar_test.py
@@ -2,7 +2,6 @@ from __future__ import annotations
from datetime import datetime
from datetime import timedelta
-from typing import TYPE_CHECKING
from typing import Any
import numpy as np
@@ -11,9 +10,7 @@ import pytest
import narwhals.stable.v1 as nw
from narwhals.dependencies import get_cudf
-
-if TYPE_CHECKING:
- from tests.utils import ConstructorEager
+from tests.utils import PANDAS_VERSION
@pytest.mark.parametrize(
@@ -28,31 +25,31 @@ if TYPE_CHECKING:
(b"a", b"a"),
(datetime(2021, 1, 1), datetime(2021, 1, 1)),
(timedelta(days=1), timedelta(days=1)),
+ (pd.Timestamp("2020-01-01"), datetime(2020, 1, 1)),
+ (pd.Timedelta(days=3), timedelta(days=3)),
+ (np.datetime64("2020-01-01", "s"), datetime(2020, 1, 1)),
+ (np.datetime64("2020-01-01", "ms"), datetime(2020, 1, 1)),
+ (np.datetime64("2020-01-01", "us"), datetime(2020, 1, 1)),
+ (np.datetime64("2020-01-01", "ns"), datetime(2020, 1, 1)),
],
)
def test_to_py_scalar(
- constructor_eager: ConstructorEager,
input_value: Any,
expected: Any,
- request: pytest.FixtureRequest,
) -> None:
- if isinstance(input_value, bytes) and "cudf" in str(constructor_eager):
- request.applymarker(pytest.mark.xfail)
- df = nw.from_native(constructor_eager({"a": [input_value]}))
- output = nw.to_py_scalar(df["a"].item(0))
- if expected == 1 and constructor_eager.__name__.startswith("pandas"):
+ output = nw.to_py_scalar(input_value)
+ if expected == 1:
assert not isinstance(output, np.int64)
- elif isinstance(expected, datetime) and constructor_eager.__name__.startswith(
- "pandas"
- ):
- assert not isinstance(output, pd.Timestamp)
- elif isinstance(expected, timedelta) and constructor_eager.__name__.startswith(
- "pandas"
- ):
- assert not isinstance(output, pd.Timedelta)
assert output == expected
[email protected](
+ PANDAS_VERSION < (1,), reason="there was a (better?) time when there was no pd.NA"
+)
+def test_na_to_py_scalar() -> None:
+ assert nw.to_py_scalar(pd.NA) is None
+
+
@pytest.mark.parametrize(
"input_value",
[np.array([1, 2]), [1, 2, 3], {"a": [1, 2, 3]}],
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 4
} | 1.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
cfgv==3.4.0
click==8.1.8
cloudpickle==3.1.1
covdefaults==2.3.0
coverage==7.8.0
dask==2024.8.0
dask-expr==1.1.10
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
filelock==3.18.0
fsspec==2025.3.1
hypothesis==6.130.5
identify==2.6.9
importlib_metadata==8.6.1
iniconfig==2.1.0
joblib==1.4.2
locket==1.0.0
-e git+https://github.com/narwhals-dev/narwhals.git@5c3db5b149cbf64b97de49bc4ad726f019f9f578#egg=narwhals
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
partd==1.4.2
platformdirs==4.3.7
pluggy==1.5.0
polars==1.26.0
pre_commit==4.2.0
pyarrow==19.0.1
pytest==8.3.5
pytest-cov==6.0.0
pytest-env==1.1.5
pytest-randomly==3.16.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
sortedcontainers==2.4.0
threadpoolctl==3.6.0
tomli==2.2.1
toolz==1.0.0
tqdm==4.67.1
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
zipp==3.21.0
| name: narwhals
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- cfgv==3.4.0
- click==8.1.8
- cloudpickle==3.1.1
- covdefaults==2.3.0
- coverage==7.8.0
- dask==2024.8.0
- dask-expr==1.1.10
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- fsspec==2025.3.1
- hypothesis==6.130.5
- identify==2.6.9
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- joblib==1.4.2
- locket==1.0.0
- narwhals==1.12.1
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- partd==1.4.2
- platformdirs==4.3.7
- pluggy==1.5.0
- polars==1.26.0
- pre-commit==4.2.0
- pyarrow==19.0.1
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-env==1.1.5
- pytest-randomly==3.16.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- sortedcontainers==2.4.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- toolz==1.0.0
- tqdm==4.67.1
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/narwhals
| [
"tests/series_only/to_numpy_test.py::test_to_numpy_tz_aware[pandas_nullable_constructor]",
"tests/series_only/to_numpy_test.py::test_to_numpy_tz_aware[pandas_pyarrow_constructor]",
"tests/series_only/to_numpy_test.py::test_to_numpy_tz_aware[pandas_constructor]",
"tests/frame/to_numpy_test.py::test_to_numpy_tz_aware[pandas_pyarrow_constructor]",
"tests/frame/to_numpy_test.py::test_to_numpy_tz_aware[pandas_nullable_constructor]",
"tests/frame/to_numpy_test.py::test_to_numpy_tz_aware[pandas_constructor]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value14-expected14]",
"tests/dtypes_test.py::test_pandas_fixed_offset_1302"
] | [] | [
"tests/series_only/to_numpy_test.py::test_to_numpy[polars_eager_constructor]",
"tests/series_only/to_numpy_test.py::test_to_numpy_tz_aware[pyarrow_table_constructor]",
"tests/series_only/to_numpy_test.py::test_to_numpy[pandas_nullable_constructor]",
"tests/series_only/to_numpy_test.py::test_to_numpy[pandas_pyarrow_constructor]",
"tests/series_only/to_numpy_test.py::test_to_numpy_tz_aware[polars_eager_constructor]",
"tests/series_only/to_numpy_test.py::test_to_numpy[pyarrow_table_constructor]",
"tests/frame/to_numpy_test.py::test_to_numpy[pandas_constructor]",
"tests/frame/to_numpy_test.py::test_to_numpy[pandas_nullable_constructor]",
"tests/frame/to_numpy_test.py::test_to_numpy_tz_aware[polars_eager_constructor]",
"tests/frame/to_numpy_test.py::test_to_numpy[polars_eager_constructor]",
"tests/frame/to_numpy_test.py::test_to_numpy_tz_aware[pyarrow_table_constructor]",
"tests/frame/to_numpy_test.py::test_to_numpy[pandas_pyarrow_constructor]",
"tests/frame/to_numpy_test.py::test_to_numpy[pyarrow_table_constructor]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value9-expected9]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value13-expected13]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar_value_error[input_value2]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value8-expected8]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[True-True]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value12-expected12]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar_value_error_cudf",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[1.0-1.0]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value10-expected10]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[1-1]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[None-None]",
"tests/translate/to_py_scalar_test.py::test_na_to_py_scalar",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[a-a1]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[a-a0]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar_value_error[input_value1]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value7-expected7]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar_value_error[input_value0]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value11-expected11]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value1-1]",
"tests/dtypes_test.py::test_duration_valid[ns]",
"tests/dtypes_test.py::test_datetime_valid[Europe/Rome-us]",
"tests/dtypes_test.py::test_datetime_valid[time_zone1-ms]",
"tests/dtypes_test.py::test_struct_hashes",
"tests/dtypes_test.py::test_datetime_valid[Europe/Rome-ms]",
"tests/dtypes_test.py::test_pandas_inplace_modification_1267",
"tests/dtypes_test.py::test_struct_reverse",
"tests/dtypes_test.py::test_duration_valid[us]",
"tests/dtypes_test.py::test_duration_invalid[abc]",
"tests/dtypes_test.py::test_datetime_valid[None-ns]",
"tests/dtypes_test.py::test_datetime_valid[time_zone1-us]",
"tests/dtypes_test.py::test_datetime_valid[None-us]",
"tests/dtypes_test.py::test_datetime_valid[time_zone1-ns]",
"tests/dtypes_test.py::test_datetime_valid[None-ms]",
"tests/dtypes_test.py::test_polars_2d_array",
"tests/dtypes_test.py::test_struct_valid",
"tests/dtypes_test.py::test_second_time_unit",
"tests/dtypes_test.py::test_array_valid",
"tests/dtypes_test.py::test_duration_valid[ms]",
"tests/dtypes_test.py::test_field_repr",
"tests/dtypes_test.py::test_list_valid",
"tests/dtypes_test.py::test_datetime_invalid[abc]",
"tests/dtypes_test.py::test_datetime_valid[Europe/Rome-ns]"
] | [] | MIT License | 20,134 | 2,636 | [
"narwhals/_pandas_like/dataframe.py",
"narwhals/_pandas_like/series.py",
"narwhals/_pandas_like/utils.py",
"narwhals/translate.py"
] |
|
narwhals-dev__narwhals-1304 | 5c3db5b149cbf64b97de49bc4ad726f019f9f578 | 2024-11-02 10:26:19 | 5c3db5b149cbf64b97de49bc4ad726f019f9f578 | diff --git a/narwhals/translate.py b/narwhals/translate.py
index 075f9b3a..65cc4422 100644
--- a/narwhals/translate.py
+++ b/narwhals/translate.py
@@ -857,6 +857,13 @@ def to_py_scalar(scalar_like: Any) -> Any:
return scalar_like
np = get_numpy()
+ if (
+ np
+ and isinstance(scalar_like, np.datetime64)
+ and scalar_like.dtype == "datetime64[ns]"
+ ):
+ return datetime(1970, 1, 1) + timedelta(microseconds=scalar_like.item() // 1000)
+
if np and np.isscalar(scalar_like) and hasattr(scalar_like, "item"):
return scalar_like.item()
| bug: `to_py_scalar` converts `np.datetime64` to `int`
```python
In [8]: nw.to_py_scalar(np.datetime64('2000-01-01', 'ns'))
Out[8]: 946684800000000000
In [9]: nw.to_py_scalar(np.datetime64('2000-01-01', 'us'))
Out[9]: datetime.datetime(2000, 1, 1, 0, 0)
```
I think both should get converted to `datetime.datetime(2000, 1, 1, 0, 0)` | narwhals-dev/narwhals | diff --git a/tests/translate/to_py_scalar_test.py b/tests/translate/to_py_scalar_test.py
index 3519b5e8..b13ba009 100644
--- a/tests/translate/to_py_scalar_test.py
+++ b/tests/translate/to_py_scalar_test.py
@@ -2,7 +2,6 @@ from __future__ import annotations
from datetime import datetime
from datetime import timedelta
-from typing import TYPE_CHECKING
from typing import Any
import numpy as np
@@ -11,9 +10,7 @@ import pytest
import narwhals.stable.v1 as nw
from narwhals.dependencies import get_cudf
-
-if TYPE_CHECKING:
- from tests.utils import ConstructorEager
+from tests.utils import PANDAS_VERSION
@pytest.mark.parametrize(
@@ -28,31 +25,31 @@ if TYPE_CHECKING:
(b"a", b"a"),
(datetime(2021, 1, 1), datetime(2021, 1, 1)),
(timedelta(days=1), timedelta(days=1)),
+ (pd.Timestamp("2020-01-01"), datetime(2020, 1, 1)),
+ (pd.Timedelta(days=3), timedelta(days=3)),
+ (np.datetime64("2020-01-01", "s"), datetime(2020, 1, 1)),
+ (np.datetime64("2020-01-01", "ms"), datetime(2020, 1, 1)),
+ (np.datetime64("2020-01-01", "us"), datetime(2020, 1, 1)),
+ (np.datetime64("2020-01-01", "ns"), datetime(2020, 1, 1)),
],
)
def test_to_py_scalar(
- constructor_eager: ConstructorEager,
input_value: Any,
expected: Any,
- request: pytest.FixtureRequest,
) -> None:
- if isinstance(input_value, bytes) and "cudf" in str(constructor_eager):
- request.applymarker(pytest.mark.xfail)
- df = nw.from_native(constructor_eager({"a": [input_value]}))
- output = nw.to_py_scalar(df["a"].item(0))
- if expected == 1 and constructor_eager.__name__.startswith("pandas"):
+ output = nw.to_py_scalar(input_value)
+ if expected == 1:
assert not isinstance(output, np.int64)
- elif isinstance(expected, datetime) and constructor_eager.__name__.startswith(
- "pandas"
- ):
- assert not isinstance(output, pd.Timestamp)
- elif isinstance(expected, timedelta) and constructor_eager.__name__.startswith(
- "pandas"
- ):
- assert not isinstance(output, pd.Timedelta)
assert output == expected
[email protected](
+ PANDAS_VERSION < (1,), reason="there was a (better?) time when there was no pd.NA"
+)
+def test_na_to_py_scalar() -> None:
+ assert nw.to_py_scalar(pd.NA) is None
+
+
@pytest.mark.parametrize(
"input_value",
[np.array([1, 2]), [1, 2, 3], {"a": [1, 2, 3]}],
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
cfgv==3.4.0
click==8.1.8
cloudpickle==3.1.1
covdefaults==2.3.0
coverage==7.8.0
dask==2024.8.0
dask-expr==1.1.10
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
filelock==3.18.0
fsspec==2025.3.1
hypothesis==6.130.5
identify==2.6.9
importlib_metadata==8.6.1
iniconfig==2.1.0
joblib==1.4.2
locket==1.0.0
-e git+https://github.com/narwhals-dev/narwhals.git@5c3db5b149cbf64b97de49bc4ad726f019f9f578#egg=narwhals
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
partd==1.4.2
platformdirs==4.3.7
pluggy==1.5.0
polars==1.26.0
pre_commit==4.2.0
pyarrow==19.0.1
pytest==8.3.5
pytest-cov==6.0.0
pytest-env==1.1.5
pytest-randomly==3.16.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
sortedcontainers==2.4.0
threadpoolctl==3.6.0
tomli==2.2.1
toolz==1.0.0
tqdm==4.67.1
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
zipp==3.21.0
| name: narwhals
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- cfgv==3.4.0
- click==8.1.8
- cloudpickle==3.1.1
- covdefaults==2.3.0
- coverage==7.8.0
- dask==2024.8.0
- dask-expr==1.1.10
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- fsspec==2025.3.1
- hypothesis==6.130.5
- identify==2.6.9
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- joblib==1.4.2
- locket==1.0.0
- narwhals==1.12.1
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- partd==1.4.2
- platformdirs==4.3.7
- pluggy==1.5.0
- polars==1.26.0
- pre-commit==4.2.0
- pyarrow==19.0.1
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-env==1.1.5
- pytest-randomly==3.16.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- sortedcontainers==2.4.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- toolz==1.0.0
- tqdm==4.67.1
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/narwhals
| [
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value14-expected14]"
] | [] | [
"tests/translate/to_py_scalar_test.py::test_na_to_py_scalar",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar_value_error_cudf",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[None-None]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar_value_error[input_value0]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[a-a1]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value1-1]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value9-expected9]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[True-True]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value12-expected12]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value10-expected10]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar_value_error[input_value1]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value8-expected8]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[a-a0]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[1-1]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar_value_error[input_value2]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[1.0-1.0]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value7-expected7]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value11-expected11]",
"tests/translate/to_py_scalar_test.py::test_to_py_scalar[input_value13-expected13]"
] | [] | MIT License | 20,135 | 205 | [
"narwhals/translate.py"
] |
|
unionai-oss__pandera-1844 | 9667234b4f6c273e1c3e9deac1ef982e3a58ff27 | 2024-11-02 20:32:51 | 6bf6bc071ef40826255ed6f2ff95faeebbf7c11b | codecov[bot]: ## [Codecov](https://app.codecov.io/gh/unionai-oss/pandera/pull/1844?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=unionai-oss) Report
Attention: Patch coverage is `30.61224%` with `34 lines` in your changes missing coverage. Please review.
> Project coverage is 59.05%. Comparing base [(`812b2a8`)](https://app.codecov.io/gh/unionai-oss/pandera/commit/812b2a8af8c123c46caa348837da04e8c6573260?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=unionai-oss) to head [(`9b5e30f`)](https://app.codecov.io/gh/unionai-oss/pandera/commit/9b5e30fdadaea6c196d67daae9fb551704823c9e?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=unionai-oss).
> Report is 154 commits behind head on main.
| [Files with missing lines](https://app.codecov.io/gh/unionai-oss/pandera/pull/1844?dropdown=coverage&src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=unionai-oss) | Patch % | Lines |
|---|---|---|
| [pandera/io/pandas\_io.py](https://app.codecov.io/gh/unionai-oss/pandera/pull/1844?src=pr&el=tree&filepath=pandera%2Fio%2Fpandas_io.py&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=unionai-oss#diff-cGFuZGVyYS9pby9wYW5kYXNfaW8ucHk=) | 0.00% | [34 Missing :warning: ](https://app.codecov.io/gh/unionai-oss/pandera/pull/1844?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=unionai-oss) |
> :exclamation: There is a different number of reports uploaded between BASE (812b2a8) and HEAD (9b5e30f). Click for more details.
>
> <details><summary>HEAD has 47 uploads less than BASE</summary>
>
>| Flag | BASE (812b2a8) | HEAD (9b5e30f) |
>|------|------|------|
>||48|1|
></details>
<details><summary>Additional details and impacted files</summary>
```diff
@@ Coverage Diff @@
## main #1844 +/- ##
===========================================
- Coverage 94.28% 59.05% -35.23%
===========================================
Files 91 120 +29
Lines 7013 9229 +2216
===========================================
- Hits 6612 5450 -1162
- Misses 401 3779 +3378
```
</details>
[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/unionai-oss/pandera/pull/1844?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=unionai-oss).
:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=unionai-oss).
cosmicBboy: @alexismanuel looks like there are some failing unit tests | diff --git a/pandera/engines/pandas_engine.py b/pandera/engines/pandas_engine.py
index 41ab94e..6f6f923 100644
--- a/pandera/engines/pandas_engine.py
+++ b/pandera/engines/pandas_engine.py
@@ -1661,7 +1661,7 @@ if PYARROW_INSTALLED and PANDAS_2_0_0_PLUS:
equivalents=[pyarrow.dictionary, pyarrow.DictionaryType]
)
@immutable(init=True)
- class ArrowDictionary(ArrowDataType, dtypes.Category):
+ class ArrowDictionary(ArrowDataType):
"""Semantic representation of a :class:`pyarrow.dictionary`."""
type: Optional[pd.ArrowDtype] = dataclasses.field(
diff --git a/pandera/engines/pyarrow_engine.py b/pandera/engines/pyarrow_engine.py
index 5ddb8b7..695550d 100644
--- a/pandera/engines/pyarrow_engine.py
+++ b/pandera/engines/pyarrow_engine.py
@@ -268,7 +268,7 @@ class ArrowTimestamp(ArrowDataType, dtypes.Timestamp):
equivalents=[pyarrow.dictionary, pyarrow.DictionaryType]
)
@immutable(init=True)
-class ArrowDictionary(ArrowDataType, dtypes.Category):
+class ArrowDictionary(ArrowDataType):
"""Semantic representation of a :class:`pyarrow.dictionary`."""
type: Optional[pd.ArrowDtype] = dataclasses.field(default=None, init=False)
diff --git a/pandera/io/pandas_io.py b/pandera/io/pandas_io.py
index 004a024..fe17b5f 100644
--- a/pandera/io/pandas_io.py
+++ b/pandera/io/pandas_io.py
@@ -68,15 +68,28 @@ def _serialize_check_stats(check_stats, dtype=None):
return stat
- # for unary checks, return a single value instead of a dictionary
- if len(check_stats) == 1:
- return handle_stat_dtype(list(check_stats.values())[0])
+ # Extract check options if they exist
+ check_options = (
+ check_stats.pop("options", {}) if isinstance(check_stats, dict) else {}
+ )
+
+ # Handle unary checks
+ if isinstance(check_stats, dict) and len(check_stats) == 1:
+ value = handle_stat_dtype(list(check_stats.values())[0])
+ if check_options:
+ return {"value": value, "options": check_options}
+ return value
- # otherwise return a dictionary of keyword args needed to create the Check
- serialized_check_stats = {}
- for arg, stat in check_stats.items():
- serialized_check_stats[arg] = handle_stat_dtype(stat)
- return serialized_check_stats
+ # Handle dictionary case
+ if isinstance(check_stats, dict):
+ serialized_check_stats = {}
+ for arg, stat in check_stats.items():
+ serialized_check_stats[arg] = handle_stat_dtype(stat)
+ if check_options:
+ serialized_check_stats["options"] = check_options
+ return serialized_check_stats
+
+ return handle_stat_dtype(check_stats)
def _serialize_dataframe_stats(dataframe_checks):
@@ -178,6 +191,8 @@ def serialize_schema(dataframe_schema):
def _deserialize_check_stats(check, serialized_check_stats, dtype=None):
+ """Deserialize check statistics and reconstruct check with options."""
+
def handle_stat_dtype(stat):
try:
if pandas_engine.Engine.dtype(dtypes.DateTime).check(dtype):
@@ -189,15 +204,35 @@ def _deserialize_check_stats(check, serialized_check_stats, dtype=None):
return stat
return stat
+ # Extract options if they exist
+ options = {}
if isinstance(serialized_check_stats, dict):
# handle case where serialized check stats are in the form of a
# dictionary mapping Check arg names to values.
+ options = serialized_check_stats.pop("options", {})
+ # Handle special case for unary checks with options
+ if (
+ "value" in serialized_check_stats
+ and len(serialized_check_stats) == 1
+ ):
+ serialized_check_stats = serialized_check_stats["value"]
+
+ # Create check with original logic
+ if isinstance(serialized_check_stats, dict):
check_stats = {}
for arg, stat in serialized_check_stats.items():
check_stats[arg] = handle_stat_dtype(stat)
- return check(**check_stats)
- # otherwise assume unary check function signature
- return check(handle_stat_dtype(serialized_check_stats))
+ check_instance = check(**check_stats)
+ else:
+ # otherwise assume unary check function signature
+ check_instance = check(handle_stat_dtype(serialized_check_stats))
+
+ # Apply options if they exist
+ if options:
+ for option_name, option_value in options.items():
+ setattr(check_instance, option_name, option_value)
+
+ return check_instance
def _deserialize_component_stats(serialized_component_stats):
@@ -447,6 +482,7 @@ MultiIndex(indexes=[{indexes}])
def _format_checks(checks_dict):
+ """Format checks into string representation including options."""
if checks_dict is None:
return "None"
@@ -457,11 +493,33 @@ def _format_checks(checks_dict):
f"Check {check_name} cannot be serialized. "
"This check will be ignored"
)
- else:
+ continue
+
+ # Handle options separately
+ options = (
+ check_kwargs.pop("options", {})
+ if isinstance(check_kwargs, dict)
+ else {}
+ )
+
+ # Format main check arguments
+ if isinstance(check_kwargs, dict):
args = ", ".join(
f"{k}={v.__repr__()}" for k, v in check_kwargs.items()
)
- checks.append(f"Check.{check_name}({args})")
+ else:
+ args = check_kwargs.__repr__()
+
+ # Add options to arguments if they exist
+ if options:
+ if args:
+ args += ", "
+ args += ", ".join(
+ f"{k}={v.__repr__()}" for k, v in options.items()
+ )
+
+ checks.append(f"Check.{check_name}({args})")
+
return f"[{', '.join(checks)}]"
diff --git a/pandera/schema_statistics/pandas.py b/pandera/schema_statistics/pandas.py
index 00d51cf..b2e55f1 100644
--- a/pandera/schema_statistics/pandas.py
+++ b/pandera/schema_statistics/pandas.py
@@ -68,14 +68,29 @@ def infer_index_statistics(index: Union[pd.Index, pd.MultiIndex]):
def parse_check_statistics(check_stats: Union[Dict[str, Any], None]):
- """Convert check statistics to a list of Check objects."""
+ """Convert check statistics to a list of Check objects, including their options."""
if check_stats is None:
return None
checks = []
for check_name, stats in check_stats.items():
check = getattr(Check, check_name)
try:
- checks.append(check(**stats))
+ # Extract options if present
+ if isinstance(stats, dict):
+ options = (
+ stats.pop("options", {}) if "options" in stats else {}
+ )
+ if stats: # If there are remaining stats
+ check_instance = check(**stats)
+ else: # Handle case where all stats were in options
+ check_instance = check()
+ # Apply options to the check instance
+ for option_name, option_value in options.items():
+ setattr(check_instance, option_name, option_value)
+ checks.append(check_instance)
+ else:
+ # Handle unary check case
+ checks.append(check(stats))
except TypeError:
# if stats cannot be unpacked as key-word args, assume unary check.
checks.append(check(stats))
@@ -142,9 +157,10 @@ def get_series_schema_statistics(series_schema):
def parse_checks(checks) -> Union[Dict[str, Any], None]:
- """Convert Check object to check statistics."""
+ """Convert Check object to check statistics including options."""
check_statistics = {}
_check_memo = {}
+
for check in checks:
if check not in Check:
warnings.warn(
@@ -154,28 +170,46 @@ def parse_checks(checks) -> Union[Dict[str, Any], None]:
)
continue
- check_statistics[check.name] = (
- {} if check.statistics is None else check.statistics
- )
+ # Get base statistics
+ base_stats = {} if check.statistics is None else check.statistics
+
+ # Collect check options
+ check_options = {
+ "raise_warning": check.raise_warning,
+ "n_failure_cases": check.n_failure_cases,
+ "ignore_na": check.ignore_na,
+ }
+
+ # Filter out None values from options
+ check_options = {
+ k: v for k, v in check_options.items() if v is not None
+ }
+
+ # Combine statistics with options
+ check_statistics[check.name] = base_stats
+ if check_options:
+ check_statistics[check.name]["options"] = check_options
+
_check_memo[check.name] = check
- # raise ValueError on incompatible checks
+ # Check for incompatible checks
if (
"greater_than_or_equal_to" in check_statistics
and "less_than_or_equal_to" in check_statistics
):
min_value = check_statistics.get(
"greater_than_or_equal_to", float("-inf")
- )["min_value"]
+ ).get("min_value", float("-inf"))
max_value = check_statistics.get(
"less_than_or_equal_to", float("inf")
- )["max_value"]
+ ).get("max_value", float("inf"))
if min_value > max_value:
raise ValueError(
f"checks {_check_memo['greater_than_or_equal_to']} "
f"and {_check_memo['less_than_or_equal_to']} are incompatible, reason: "
f"min value {min_value} > max value {max_value}"
)
+
return check_statistics if check_statistics else None
| raise_warning option in a Check is not kept when saving the Schema to yaml or script.
As in the title, if you create a Schema and set 'raise_warning = True' in a check, then this behaviour will not persist if you then save the Schema to a yaml file or even as a Python script. This is on pandera v0.8.0.
#### Example:
```python
df = pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]}
)
schema = pa.schema_inference.infer_schema(df)
schema = schema.update_column('a', checks=[pa.Check.isin([2, 3, 4], raise_warning = True)])
schema.validate(df)
```
This returns:
```
UserWarning: <Schema Column(name=a, type=DataType(int64))> failed element-wise validator 0:
<Check isin: isin({2, 3, 4})>
failure cases:
index failure_case
0 0 1
warnings.warn(error_msg, UserWarning)
```
However when I save to yaml and re-create the Schema:
```python
schema.to_yaml("./example_schema.yml")
new_schema = pa.DataFrameSchema.from_yaml("./example_schema.yml")
new_schema.validate(df)
```
This returns (after all the traceback):
```
SchemaError: <Schema Column(name=a, type=int64)> failed element-wise validator 0:
<Check isin: isin({2, 3, 4})>
failure cases:
index failure_case
0 0 1
```
#### Expected behavior
I would expect the error/warning behaviour for Checks to persist if saving a Schema to file.
| unionai-oss/pandera | diff --git a/tests/core/test_pandas_engine.py b/tests/core/test_pandas_engine.py
index bd4187c..f79869e 100644
--- a/tests/core/test_pandas_engine.py
+++ b/tests/core/test_pandas_engine.py
@@ -92,12 +92,10 @@ def test_pandas_data_type_check(data_type_cls):
return
check_result = data_type.check(
- pandas_engine.Engine.dtype(data_container.dtype),
- data_container,
+ pandas_engine.Engine.dtype(data_container.dtype), data_container
)
assert isinstance(check_result, bool) or isinstance(
- check_result.all(),
- (bool, np.bool_),
+ check_result.all(), (bool, np.bool_)
)
@@ -210,7 +208,7 @@ def test_pandas_datetimetz_dtype(timezone_aware, data, timezone):
data=pd_st.series(
dtype="datetime64[ns]",
index=pd_st.range_indexes(min_size=5, max_size=10),
- ),
+ )
)
def test_pandas_date_coerce_dtype(to_df, data):
"""Test that pandas Date dtype coerces to datetime.date object."""
@@ -241,22 +239,11 @@ def test_pandas_date_coerce_dtype(to_df, data):
pandas_arrow_dtype_cases = (
- (
- pd.Series([["a", "b", "c"]]),
- pyarrow.list_(pyarrow.string()),
- ),
- (
- pd.Series([["a", "b"]]),
- pyarrow.list_(pyarrow.string(), 2),
- ),
+ (pd.Series([["a", "b", "c"]]), pyarrow.list_(pyarrow.string())),
+ (pd.Series([["a", "b"]]), pyarrow.list_(pyarrow.string(), 2)),
(
pd.Series([{"foo": 1, "bar": "a"}]),
- pyarrow.struct(
- [
- ("foo", pyarrow.int64()),
- ("bar", pyarrow.string()),
- ]
- ),
+ pyarrow.struct([("foo", pyarrow.int64()), ("bar", pyarrow.string())]),
),
(pd.Series([None, pd.NA, np.nan]), pyarrow.null),
(pd.Series([None, date(1970, 1, 1)]), pyarrow.date32),
@@ -277,6 +264,10 @@ pandas_arrow_dtype_cases = (
(pd.Series(["foo", "bar", "baz", None]), pyarrow.binary(3)),
(pd.Series(["foo", "barbaz", None]), pyarrow.large_binary()),
(pd.Series(["1", "1.0", "foo", "bar", None]), pyarrow.large_string()),
+ (
+ pd.Series(["a", "b", "c"]),
+ pyarrow.dictionary(pyarrow.int64(), pyarrow.string()),
+ ),
)
@@ -289,26 +280,16 @@ def test_pandas_arrow_dtype(data, dtype):
pytest.skip("Support of pandas 2.0.0+ with pyarrow only")
dtype = pandas_engine.Engine.dtype(dtype)
- dtype.coerce(data)
+ coerced_data = dtype.coerce(data)
+ assert coerced_data.dtype == dtype.type
pandas_arrow_dtype_error_cases = (
- (
- pd.Series([["a", "b", "c"]]),
- pyarrow.list_(pyarrow.int64()),
- ),
- (
- pd.Series([["a", "b"]]),
- pyarrow.list_(pyarrow.string(), 3),
- ),
+ (pd.Series([["a", "b", "c"]]), pyarrow.list_(pyarrow.int64())),
+ (pd.Series([["a", "b"]]), pyarrow.list_(pyarrow.string(), 3)),
(
pd.Series([{"foo": 1, "bar": "a"}]),
- pyarrow.struct(
- [
- ("foo", pyarrow.string()),
- ("bar", pyarrow.int64()),
- ]
- ),
+ pyarrow.struct([("foo", pyarrow.string()), ("bar", pyarrow.int64())]),
),
(pd.Series(["a", "1"]), pyarrow.null),
(pd.Series(["a", date(1970, 1, 1), "1970-01-01"]), pyarrow.date32),
@@ -329,6 +310,14 @@ pandas_arrow_dtype_error_cases = (
(pd.Series(["foo", "bar", "baz", None]), pyarrow.binary(2)),
(pd.Series([1, "foo", "barbaz", None]), pyarrow.large_binary()),
(pd.Series([1, 1.0, "foo", "bar", None]), pyarrow.large_string()),
+ (
+ pd.Series([1.0, 2.0, 3.0]),
+ pyarrow.dictionary(pyarrow.int64(), pyarrow.float64()),
+ ),
+ (
+ pd.Series(["a", "b", "c"]),
+ pyarrow.dictionary(pyarrow.int64(), pyarrow.int64()),
+ ),
)
@@ -347,6 +336,8 @@ def test_pandas_arrow_dtype_error(data, dtype):
pyarrow.ArrowTypeError,
NotImplementedError,
ValueError,
+ AssertionError,
)
):
- dtype.coerce(data)
+ coerced_data = dtype.coerce(data)
+ assert coerced_data.dtype == dtype.type
diff --git a/tests/core/test_schema_statistics.py b/tests/core/test_schema_statistics.py
index 010c675..78cecfa 100644
--- a/tests/core/test_schema_statistics.py
+++ b/tests/core/test_schema_statistics.py
@@ -467,8 +467,14 @@ def test_get_dataframe_schema_statistics():
"int": {
"dtype": DEFAULT_INT,
"checks": {
- "greater_than_or_equal_to": {"min_value": 0},
- "less_than_or_equal_to": {"max_value": 100},
+ "greater_than_or_equal_to": {
+ "min_value": 0,
+ "options": {"ignore_na": True, "raise_warning": False},
+ },
+ "less_than_or_equal_to": {
+ "max_value": 100,
+ "options": {"ignore_na": True, "raise_warning": False},
+ },
},
"nullable": True,
"unique": False,
@@ -481,8 +487,14 @@ def test_get_dataframe_schema_statistics():
"float": {
"dtype": DEFAULT_FLOAT,
"checks": {
- "greater_than_or_equal_to": {"min_value": 50},
- "less_than_or_equal_to": {"max_value": 100},
+ "greater_than_or_equal_to": {
+ "min_value": 50,
+ "options": {"ignore_na": True, "raise_warning": False},
+ },
+ "less_than_or_equal_to": {
+ "max_value": 100,
+ "options": {"ignore_na": True, "raise_warning": False},
+ },
},
"nullable": False,
"unique": False,
@@ -494,7 +506,12 @@ def test_get_dataframe_schema_statistics():
},
"str": {
"dtype": pandas_engine.Engine.dtype(str),
- "checks": {"isin": {"allowed_values": ["foo", "bar", "baz"]}},
+ "checks": {
+ "isin": {
+ "allowed_values": ["foo", "bar", "baz"],
+ "options": {"ignore_na": True, "raise_warning": False},
+ }
+ },
"nullable": False,
"unique": False,
"coerce": False,
@@ -507,7 +524,12 @@ def test_get_dataframe_schema_statistics():
"index": [
{
"dtype": DEFAULT_INT,
- "checks": {"greater_than_or_equal_to": {"min_value": 0}},
+ "checks": {
+ "greater_than_or_equal_to": {
+ "min_value": 0,
+ "options": {"ignore_na": True, "raise_warning": False},
+ }
+ },
"nullable": False,
"coerce": False,
"name": "int_index",
@@ -537,8 +559,14 @@ def test_get_series_schema_statistics():
"dtype": pandas_engine.Engine.dtype(int),
"nullable": False,
"checks": {
- "greater_than_or_equal_to": {"min_value": 0},
- "less_than_or_equal_to": {"max_value": 100},
+ "greater_than_or_equal_to": {
+ "min_value": 0,
+ "options": {"ignore_na": True, "raise_warning": False},
+ },
+ "less_than_or_equal_to": {
+ "max_value": 100,
+ "options": {"ignore_na": True, "raise_warning": False},
+ },
},
"name": None,
"coerce": False,
@@ -566,8 +594,20 @@ def test_get_series_schema_statistics():
"dtype": pandas_engine.Engine.dtype(int),
"nullable": False,
"checks": {
- "greater_than_or_equal_to": {"min_value": 10},
- "less_than_or_equal_to": {"max_value": 20},
+ "greater_than_or_equal_to": {
+ "min_value": 10,
+ "options": {
+ "ignore_na": True,
+ "raise_warning": False,
+ },
+ },
+ "less_than_or_equal_to": {
+ "max_value": 20,
+ "options": {
+ "ignore_na": True,
+ "raise_warning": False,
+ },
+ },
},
"name": "int_index",
"coerce": False,
@@ -591,7 +631,15 @@ def test_get_index_schema_statistics(index_schema_component, expectation):
"checks, expectation",
[
*[
- [[check], {check.name: check.statistics}]
+ [
+ [check],
+ {
+ check.name: {
+ **(check.statistics or {}),
+ "options": {"ignore_na": True, "raise_warning": False},
+ }
+ },
+ ]
for check in [
pa.Check.greater_than(1),
pa.Check.less_than(1),
@@ -614,9 +662,18 @@ def test_get_index_schema_statistics(index_schema_component, expectation):
pa.Check.isin([10, 20, 30, 40, 50]),
],
{
- "greater_than_or_equal_to": {"min_value": 10},
- "less_than_or_equal_to": {"max_value": 50},
- "isin": {"allowed_values": [10, 20, 30, 40, 50]},
+ "greater_than_or_equal_to": {
+ "min_value": 10,
+ "options": {"ignore_na": True, "raise_warning": False},
+ },
+ "less_than_or_equal_to": {
+ "max_value": 50,
+ "options": {"ignore_na": True, "raise_warning": False},
+ },
+ "isin": {
+ "allowed_values": [10, 20, 30, 40, 50],
+ "options": {"ignore_na": True, "raise_warning": False},
+ },
},
],
# incompatible checks
@@ -650,7 +707,13 @@ def test_parse_checks_and_statistics_roundtrip(checks, expectation):
check_statistics = {check.name: check.statistics for check in checks}
check_list = schema_statistics.parse_check_statistics(check_statistics)
- assert set(check_list) == set(checks)
+ assert all(
+ c1.name == c2.name and c1.statistics == c2.statistics
+ for c1, c2 in zip(
+ sorted(checks, key=lambda x: x.name),
+ sorted(check_list, key=lambda x: x.name),
+ )
+ )
# pylint: disable=unused-argument
@@ -661,12 +724,20 @@ def test_parse_checks_and_statistics_no_param(extra_registered_checks):
"""
checks = [pa.Check.no_param_check()]
- expectation = {"no_param_check": {}}
+ expectation = {
+ "no_param_check": {
+ "options": {"ignore_na": True, "raise_warning": False}
+ }
+ }
assert schema_statistics.parse_checks(checks) == expectation
check_statistics = {check.name: check.statistics for check in checks}
check_list = schema_statistics.parse_check_statistics(check_statistics)
- assert set(check_list) == set(checks)
-
-# pylint: enable=unused-argument
+ assert all(
+ c1.name == c2.name and c1.statistics == c2.statistics
+ for c1, c2 in zip(
+ sorted(checks, key=lambda x: x.name),
+ sorted(check_list, key=lambda x: x.name),
+ )
+ )
diff --git a/tests/io/test_io.py b/tests/io/test_io.py
index c87302f..d87c2de 100644
--- a/tests/io/test_io.py
+++ b/tests/io/test_io.py
@@ -127,13 +127,24 @@ columns:
dtype: int64
nullable: false
checks:
- greater_than: 0
- less_than: 10
+ greater_than:
+ value: 0
+ options:
+ raise_warning: false
+ ignore_na: true
+ less_than:
+ value: 10
+ options:
+ raise_warning: false
+ ignore_na: true
in_range:
min_value: 0
max_value: 10
include_min: true
include_max: true
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: false
required: true
@@ -144,13 +155,24 @@ columns:
dtype: float64
nullable: false
checks:
- greater_than: -10
- less_than: 20
+ greater_than:
+ value: -10
+ options:
+ raise_warning: false
+ ignore_na: true
+ less_than:
+ value: 20
+ options:
+ raise_warning: false
+ ignore_na: true
in_range:
min_value: -10
max_value: 20
include_min: true
include_max: true
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: false
required: true
@@ -162,13 +184,20 @@ columns:
nullable: false
checks:
isin:
- - foo
- - bar
- - x
- - xy
+ value:
+ - foo
+ - bar
+ - x
+ - xy
+ options:
+ raise_warning: false
+ ignore_na: true
str_length:
min_value: 1
max_value: 3
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: false
required: true
@@ -179,8 +208,16 @@ columns:
dtype: datetime64[ns]
nullable: false
checks:
- greater_than: '2010-01-01 00:00:00'
- less_than: '2020-01-01 00:00:00'
+ greater_than:
+ value: '2010-01-01 00:00:00'
+ options:
+ raise_warning: false
+ ignore_na: true
+ less_than:
+ value: '2020-01-01 00:00:00'
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: false
required: true
@@ -191,8 +228,16 @@ columns:
dtype: timedelta64[ns]
nullable: false
checks:
- greater_than: 1000
- less_than: 10000
+ greater_than:
+ value: 1000
+ options:
+ raise_warning: false
+ ignore_na: true
+ less_than:
+ value: 10000
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: false
required: true
@@ -206,6 +251,9 @@ columns:
str_length:
min_value: 1
max_value: 3
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: true
required: false
@@ -217,10 +265,14 @@ columns:
nullable: false
checks:
isin:
- - foo
- - bar
- - x
- - xy
+ value:
+ - foo
+ - bar
+ - x
+ - xy
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: false
required: true
@@ -290,13 +342,20 @@ columns:
nullable: false
checks:
isin:
- - foo
- - bar
- - x
- - xy
+ value:
+ - foo
+ - bar
+ - x
+ - xy
+ options:
+ raise_warning: false
+ ignore_na: true
str_length:
min_value: 1
max_value: 3
+ options:
+ raise_warning: false
+ ignore_na: true
index: null
checks: null
coerce: false
@@ -388,13 +447,24 @@ columns:
dtype: int64
nullable: false
checks:
- greater_than: 0
- less_than: 10
+ greater_than:
+ value: 0
+ options:
+ raise_warning: false
+ ignore_na: true
+ less_than:
+ value: 10
+ options:
+ raise_warning: false
+ ignore_na: true
in_range:
min_value: 0
max_value: 10
include_min: true
include_max: true
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: false
required: true
@@ -405,13 +475,24 @@ columns:
dtype: float64
nullable: false
checks:
- greater_than: -10
- less_than: 20
+ greater_than:
+ value: -10
+ options:
+ raise_warning: false
+ ignore_na: true
+ less_than:
+ value: 20
+ options:
+ raise_warning: false
+ ignore_na: true
in_range:
min_value: -10
max_value: 20
include_min: true
include_max: true
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: false
required: true
@@ -423,13 +504,20 @@ columns:
nullable: false
checks:
isin:
- - foo
- - bar
- - x
- - xy
+ value:
+ - foo
+ - bar
+ - x
+ - xy
+ options:
+ raise_warning: false
+ ignore_na: true
str_length:
min_value: 1
max_value: 3
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: false
required: true
@@ -440,8 +528,16 @@ columns:
dtype: datetime64[ns]
nullable: false
checks:
- greater_than: '2010-01-01 00:00:00'
- less_than: '2020-01-01 00:00:00'
+ greater_than:
+ value: '2010-01-01 00:00:00'
+ options:
+ raise_warning: false
+ ignore_na: true
+ less_than:
+ value: '2020-01-01 00:00:00'
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: false
required: true
@@ -452,8 +548,16 @@ columns:
dtype: timedelta64[ns]
nullable: false
checks:
- greater_than: 1000
- less_than: 10000
+ greater_than:
+ value: 1000
+ options:
+ raise_warning: false
+ ignore_na: true
+ less_than:
+ value: 10000
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: false
required: true
@@ -467,6 +571,9 @@ columns:
str_length:
min_value: 1
max_value: 3
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: true
required: false
@@ -478,10 +585,14 @@ columns:
nullable: false
checks:
isin:
- - foo
- - bar
- - x
- - xy
+ value:
+ - foo
+ - bar
+ - x
+ - xy
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: false
required: true
@@ -1138,6 +1249,9 @@ columns:
max_value: 99
include_min: true
include_max: true
+ options:
+ raise_warning: false
+ ignore_na: true
unique: true
coerce: true
required: true
@@ -1148,7 +1262,11 @@ columns:
dtype: {INT_DTYPE}
nullable: true
checks:
- less_than_or_equal_to: 30
+ less_than_or_equal_to:
+ value: 30
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: true
required: true
@@ -1162,6 +1280,9 @@ columns:
str_length:
min_value: 3
max_value: 80
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: true
required: true
@@ -1172,7 +1293,11 @@ columns:
dtype: {STR_DTYPE}
nullable: true
checks:
- str_matches: ^\\d{{3}}[A-Z]$
+ str_matches:
+ value: ^\\d{{3}}[A-Z]$
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: true
required: true
@@ -1186,6 +1311,9 @@ columns:
str_length:
min_value: 3
max_value: null
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: true
required: true
@@ -1199,6 +1327,9 @@ columns:
str_length:
min_value: null
max_value: 3
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: true
required: true
@@ -1210,9 +1341,13 @@ columns:
nullable: false
checks:
isin:
- - 1.0
- - 2.0
- - 3.0
+ value:
+ - 1.0
+ - 2.0
+ - 3.0
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: true
required: true
@@ -1233,7 +1368,11 @@ columns:
dtype: {STR_DTYPE}
nullable: true
checks:
- greater_than_or_equal_to: '20201231'
+ greater_than_or_equal_to:
+ value: '20201231'
+ options:
+ raise_warning: false
+ ignore_na: true
unique: false
coerce: true
required: true
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 4
} | 0.21 | {
"env_vars": null,
"env_yml_path": [
"environment.yml"
],
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "environment.yml",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiosignal==1.3.2
alabaster @ file:///home/conda/feedstock_root/build_artifacts/alabaster_1704848697227/work
annotated-types @ file:///home/conda/feedstock_root/build_artifacts/annotated-types_1733247046149/work
anyio @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_anyio_1742243108/work
argcomplete @ file:///home/conda/feedstock_root/build_artifacts/argcomplete_1742703822879/work
astroid @ file:///home/conda/feedstock_root/build_artifacts/astroid_1695739483793/work
asttokens @ file:///home/conda/feedstock_root/build_artifacts/asttokens_1733250440834/work
asv @ file:///home/conda/feedstock_root/build_artifacts/asv_1725737739405/work
asv_runner @ file:///home/conda/feedstock_root/build_artifacts/asv_runner_1708248797019/work
attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1741918516150/work
babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1738490167835/work
backports.tarfile @ file:///home/conda/feedstock_root/build_artifacts/backports.tarfile_1733325779670/work
beautifulsoup4==4.13.3
black @ file:///home/conda/feedstock_root/build_artifacts/black-recipe_1742502760723/work
bokeh @ file:///home/conda/feedstock_root/build_artifacts/bokeh_1719324651922/work
branca @ file:///home/conda/feedstock_root/build_artifacts/branca_1734433375112/work
Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1725267488082/work
cachetools @ file:///home/conda/feedstock_root/build_artifacts/cachetools_1740094013202/work
certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1739515848642/work/certifi
cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1725571112467/work
cfgv @ file:///home/conda/feedstock_root/build_artifacts/cfgv_1734267080977/work
chardet @ file:///home/conda/feedstock_root/build_artifacts/chardet_1741797914774/work
charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1735929714516/work
click @ file:///home/conda/feedstock_root/build_artifacts/click_1734858813237/work
cloudpickle @ file:///home/conda/feedstock_root/build_artifacts/cloudpickle_1736947526808/work
cmarkgfm @ file:///home/conda/feedstock_root/build_artifacts/cmarkgfm_1732193239380/work
colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1733218098505/work
colorlog @ file:///home/conda/feedstock_root/build_artifacts/colorlog_1733258404285/work
comm @ file:///home/conda/feedstock_root/build_artifacts/comm_1733502965406/work
commonmark==0.9.1
contourpy @ file:///home/conda/feedstock_root/build_artifacts/contourpy_1727293517607/work
coverage @ file:///home/conda/feedstock_root/build_artifacts/coverage_1743381224823/work
cryptography @ file:///home/conda/feedstock_root/build_artifacts/cryptography-split_1740893557677/work
cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1733332471406/work
cytoolz @ file:///home/conda/feedstock_root/build_artifacts/cytoolz_1734107207199/work
dask @ file:///home/conda/feedstock_root/build_artifacts/dask-core_1722976580461/work
dask-expr @ file:///home/conda/feedstock_root/build_artifacts/dask-expr_1722982607046/work
debugpy @ file:///home/conda/feedstock_root/build_artifacts/debugpy_1741148409996/work
decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1740384970518/work
dependency-groups @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_dependency-groups_1739286706/work
dill @ file:///home/conda/feedstock_root/build_artifacts/dill_1733249551891/work
distlib @ file:///home/conda/feedstock_root/build_artifacts/distlib_1733238395481/work
distributed @ file:///home/conda/feedstock_root/build_artifacts/distributed_1722982528621/work
dnspython @ file:///home/conda/feedstock_root/build_artifacts/dnspython_1733256735222/work
docutils @ file:///home/conda/feedstock_root/build_artifacts/docutils_1733217766141/work
email_validator @ file:///home/conda/feedstock_root/build_artifacts/email-validator-meta_1733300719943/work
exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1733208806608/work
execnet @ file:///home/conda/feedstock_root/build_artifacts/execnet_1733230988954/work
executing @ file:///home/conda/feedstock_root/build_artifacts/executing_1733569351617/work
fastapi @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_fastapi_1742822207/work
fastapi-cli @ file:///home/conda/feedstock_root/build_artifacts/fastapi-cli_1734302308128/work
fastjsonschema @ file:///home/conda/feedstock_root/build_artifacts/python-fastjsonschema_1733235979760/work/dist
filelock @ file:///home/conda/feedstock_root/build_artifacts/filelock_1741969488311/work
folium @ file:///home/conda/feedstock_root/build_artifacts/folium_1740766619747/work
fonttools @ file:///home/conda/feedstock_root/build_artifacts/fonttools_1738940303262/work
frictionless @ file:///home/conda/feedstock_root/build_artifacts/frictionless_1661419509600/work
frozenlist==1.5.0
fsspec @ file:///home/conda/feedstock_root/build_artifacts/fsspec_1743361113926/work
furo==2024.8.6
future @ file:///home/conda/feedstock_root/build_artifacts/future_1738926421307/work
geopandas @ file:///home/conda/feedstock_root/build_artifacts/geopandas_1734346029138/work
googleapis-common-protos==1.69.2
greenlet @ file:///home/conda/feedstock_root/build_artifacts/greenlet_1734532792566/work
grpcio==1.71.0
grpcio-status==1.71.0
h11 @ file:///home/conda/feedstock_root/build_artifacts/h11_1733327467879/work
h2 @ file:///home/conda/feedstock_root/build_artifacts/h2_1738578511449/work
hpack @ file:///home/conda/feedstock_root/build_artifacts/hpack_1737618293087/work
httpcore @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_httpcore_1731707562/work
httptools @ file:///home/conda/feedstock_root/build_artifacts/httptools_1732707648856/work
httpx @ file:///home/conda/feedstock_root/build_artifacts/httpx_1733663348460/work
hyperframe @ file:///home/conda/feedstock_root/build_artifacts/hyperframe_1737618333194/work
hypothesis @ file:///home/conda/feedstock_root/build_artifacts/hypothesis_1742900877539/work
id @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_id_1737528654/work
identify @ file:///home/conda/feedstock_root/build_artifacts/identify_1741502659866/work
idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1733211830134/work
imagesize @ file:///home/conda/feedstock_root/build_artifacts/imagesize_1656939531508/work
importlib_metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1737420181517/work
importlib_resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1736252299705/work
iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1733223141826/work
ipykernel @ file:///home/conda/feedstock_root/build_artifacts/ipykernel_1719845459717/work
ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1701831663892/work
isodate @ file:///home/conda/feedstock_root/build_artifacts/isodate_1733230734792/work
isort @ file:///home/conda/feedstock_root/build_artifacts/isort_1733236185843/work
jaraco.classes @ file:///home/conda/feedstock_root/build_artifacts/jaraco.classes_1733325873251/work
jaraco.context @ file:///home/conda/feedstock_root/build_artifacts/jaraco.context_1733382590553/work
jaraco.functools @ file:///home/conda/feedstock_root/build_artifacts/jaraco.functools_1733746366381/work
jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1733300866624/work
jeepney @ file:///home/conda/feedstock_root/build_artifacts/jeepney_1740828240267/work
Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1741263328855/work
joblib @ file:///home/conda/feedstock_root/build_artifacts/joblib_1733736026804/work
json5 @ file:///home/conda/feedstock_root/build_artifacts/json5_1733272076743/work
jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema_1733472696581/work
jsonschema-specifications @ file:///tmp/tmpk0f344m9/src
jupyter-cache @ file:///home/conda/feedstock_root/build_artifacts/jupyter-cache_1731777098974/work
jupyter_client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1733440914442/work
jupyter_core @ file:///home/conda/feedstock_root/build_artifacts/jupyter_core_1727163409502/work
keyring @ file:///home/conda/feedstock_root/build_artifacts/keyring_1735210185992/work
kiwisolver @ file:///home/conda/feedstock_root/build_artifacts/kiwisolver_1725459266648/work
lazy-object-proxy @ file:///home/conda/feedstock_root/build_artifacts/lazy-object-proxy_1738323167073/work
locket @ file:///home/conda/feedstock_root/build_artifacts/locket_1650660393415/work
lz4 @ file:///home/conda/feedstock_root/build_artifacts/lz4_1733474248677/work
mapclassify @ file:///home/conda/feedstock_root/build_artifacts/mapclassify_1733731066416/work
markdown-it-py @ file:///home/conda/feedstock_root/build_artifacts/markdown-it-py_1733250460757/work
marko @ file:///home/conda/feedstock_root/build_artifacts/marko_1734356143108/work
MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1733219680183/work
matplotlib==3.9.4
matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1733416936468/work
mccabe @ file:///home/conda/feedstock_root/build_artifacts/mccabe_1733216466933/work
mdit-py-plugins @ file:///home/conda/feedstock_root/build_artifacts/mdit-py-plugins_1733854715505/work
mdurl @ file:///home/conda/feedstock_root/build_artifacts/mdurl_1733255585584/work
modin @ file:///home/conda/feedstock_root/build_artifacts/modin-packages_1734983605026/work
more-itertools @ file:///home/conda/feedstock_root/build_artifacts/more-itertools_1736883817510/work
msgpack @ file:///home/conda/feedstock_root/build_artifacts/msgpack-python_1725975012026/work
multimethod @ file:///home/conda/feedstock_root/build_artifacts/multimethod_1735317539007/work
munkres==1.1.4
mypy @ file:///home/conda/feedstock_root/build_artifacts/mypy-split_1714000834601/work
mypy_extensions @ file:///home/conda/feedstock_root/build_artifacts/mypy_extensions_1733230897951/work
myst-nb @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_myst-nb_1739024493/work
myst-parser @ file:///home/conda/feedstock_root/build_artifacts/myst-parser_1714413780344/work
nbclient @ file:///home/conda/feedstock_root/build_artifacts/nbclient_1734628800805/work
nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1733402752141/work
nest_asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1733325553580/work
networkx @ file:///home/conda/feedstock_root/build_artifacts/networkx_1698504735452/work
nh3 @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_nh3_1741652643/work
nodeenv @ file:///home/conda/feedstock_root/build_artifacts/nodeenv_1734112117269/work
nox @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_nox_1741709922/work
numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1707225342954/work/dist/numpy-1.26.4-cp39-cp39-linux_x86_64.whl#sha256=c799942b5898f6e6c60264d1663a6469a475290e758c654aeeb78e2596463abd
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1733203243479/work
pandas @ file:///home/conda/feedstock_root/build_artifacts/pandas_1715897627815/work
pandas-stubs @ file:///home/conda/feedstock_root/build_artifacts/pandas-stubs_1732655899399/work
-e git+https://github.com/unionai-oss/pandera.git@9667234b4f6c273e1c3e9deac1ef982e3a58ff27#egg=pandera
parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1733271261340/work
partd @ file:///home/conda/feedstock_root/build_artifacts/partd_1715026491486/work
pathspec @ file:///home/conda/feedstock_root/build_artifacts/pathspec_1733233363808/work
petl @ file:///home/conda/feedstock_root/build_artifacts/petl_1710290930740/work
pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1733301927746/work
pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1733327343728/work
pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1735929703139/work
pkgutil_resolve_name @ file:///home/conda/feedstock_root/build_artifacts/pkgutil-resolve-name_1733344503739/work
platformdirs @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_platformdirs_1742485085/work
pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1733222765875/work
polars @ file:///home/conda/feedstock_root/build_artifacts/polars_1742841777904/work
pre_commit @ file:///home/conda/feedstock_root/build_artifacts/pre-commit_1742475552107/work
prompt_toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1737453357274/work
protobuf @ file:///home/conda/feedstock_root/build_artifacts/protobuf_1741124571372/work/bazel-bin/python/dist/protobuf-5.29.3-cp39-abi3-linux_x86_64.whl#sha256=7a950517418fb10c23170e0132324ba319529b6002f25fbd96534cd9bf72d4d3
psutil @ file:///home/conda/feedstock_root/build_artifacts/psutil_1740663125313/work
ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1733302279685/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl#sha256=92c32ff62b5fd8cf325bec5ab90d7be3d2a8ca8c8a3813ff487a8d2002630d1f
pure_eval @ file:///home/conda/feedstock_root/build_artifacts/pure_eval_1733569405015/work
py4j @ file:///home/conda/feedstock_root/build_artifacts/py4j_1660381574436/work
pyarrow==19.0.1
pyarrow-hotfix @ file:///home/conda/feedstock_root/build_artifacts/pyarrow-hotfix_1734380560621/work
pycparser @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_pycparser_1733195786/work
pydantic @ file:///home/conda/feedstock_root/build_artifacts/pydantic_1737761369378/work
pydantic_core @ file:///home/conda/feedstock_root/build_artifacts/pydantic-core_1734571577911/work
Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1736243443484/work
pylint @ file:///home/conda/feedstock_root/build_artifacts/pylint_1682377695563/work
Pympler @ file:///home/conda/feedstock_root/build_artifacts/pympler_1733327099500/work
pyogrio @ file:///home/conda/feedstock_root/build_artifacts/pyogrio_1732013374897/work
pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1743089729650/work
pyproj @ file:///home/conda/feedstock_root/build_artifacts/pyproj_1726679693937/work
pyproject-api @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_pyproject-api_1737556652/work
PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1733217236728/work
pyspark @ file:///home/conda/feedstock_root/build_artifacts/pyspark_1740719055705/work
pytest @ file:///home/conda/feedstock_root/build_artifacts/pytest_1740946542080/work
pytest-asyncio @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_pytest-asyncio_1742911666/work/dist
pytest-cov @ file:///home/conda/feedstock_root/build_artifacts/pytest-cov_1733223023082/work
pytest-mock==3.14.0
pytest-xdist @ file:///home/conda/feedstock_root/build_artifacts/pytest-xdist_1733240780199/work
python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1733215673016/work
python-dotenv @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_python-dotenv_1742948348/work
python-multipart @ file:///home/conda/feedstock_root/build_artifacts/python-multipart_1734420773152/work
python-slugify @ file:///home/conda/feedstock_root/build_artifacts/python-slugify_1733756245724/work
pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1742920838005/work
PyYAML @ file:///home/conda/feedstock_root/build_artifacts/pyyaml_1737454647378/work
pyzmq @ file:///home/conda/feedstock_root/build_artifacts/pyzmq_1741805177758/work
ray==2.44.1
readme_renderer @ file:///home/conda/feedstock_root/build_artifacts/readme_renderer_1734339668149/work
recommonmark @ file:///home/conda/feedstock_root/build_artifacts/recommonmark_1734020769275/work
referencing @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_referencing_1737836872/work
requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1733217035951/work
requests-toolbelt @ file:///home/conda/feedstock_root/build_artifacts/requests-toolbelt_1733734787568/work
rfc3986 @ file:///home/conda/feedstock_root/build_artifacts/rfc3986_1733921695259/work
rich @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_rich_1743371105/work/dist
rich-toolkit @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_rich-toolkit_1733750834/work
rpds-py @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_rpds-py_1743037693/work
scikit-learn @ file:///home/conda/feedstock_root/build_artifacts/scikit-learn_1736496755362/work/dist/scikit_learn-1.6.1-cp39-cp39-linux_x86_64.whl#sha256=e8f978e37bb47e04e1337a63f75697b723d6d25f58e477734555faed033884ba
scipy @ file:///home/conda/feedstock_root/build_artifacts/scipy-split_1716470218293/work/dist/scipy-1.13.1-cp39-cp39-linux_x86_64.whl#sha256=e6696cb8683d94467891b7648e068a3970f6bc0a1b3c1aa7f9bc89458eafd2f0
SecretStorage @ file:///home/conda/feedstock_root/build_artifacts/secretstorage_1725915609225/work
shapely @ file:///home/conda/feedstock_root/build_artifacts/shapely_1741166945909/work
shellingham @ file:///home/conda/feedstock_root/build_artifacts/shellingham_1733300899265/work
simpleeval @ file:///home/conda/feedstock_root/build_artifacts/simpleeval_1698070470799/work
six @ file:///home/conda/feedstock_root/build_artifacts/six_1733380938961/work
sniffio @ file:///home/conda/feedstock_root/build_artifacts/sniffio_1733244044561/work
snowballstemmer @ file:///home/conda/feedstock_root/build_artifacts/snowballstemmer_1637143057757/work
sortedcontainers @ file:///home/conda/feedstock_root/build_artifacts/sortedcontainers_1738440353519/work
soupsieve==2.6
Sphinx @ file:///home/conda/feedstock_root/build_artifacts/sphinx_1721487534232/work
sphinx-autodoc-typehints @ file:///home/conda/feedstock_root/build_artifacts/sphinx-autodoc-typehints_1618459348655/work
sphinx-basic-ng==1.0.0b2
sphinx-copybutton @ file:///home/conda/feedstock_root/build_artifacts/sphinx-copybutton_1734572975006/work
sphinx-docsearch==0.1.0
sphinx_design @ file:///home/conda/feedstock_root/build_artifacts/sphinx-design_1734614570224/work
sphinxcontrib-applehelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-applehelp_1733754101641/work
sphinxcontrib-devhelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-devhelp_1733754113405/work
sphinxcontrib-htmlhelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-htmlhelp_1733754280555/work
sphinxcontrib-jsmath @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-jsmath_1733753744933/work
sphinxcontrib-qthelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-qthelp_1733753408017/work
sphinxcontrib-serializinghtml @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-serializinghtml_1733750479108/work
SQLAlchemy @ file:///home/conda/feedstock_root/build_artifacts/sqlalchemy_1743109707043/work
stack_data @ file:///home/conda/feedstock_root/build_artifacts/stack_data_1733569443808/work
starlette @ file:///home/conda/feedstock_root/build_artifacts/starlette_1741450612959/work
stringcase @ file:///home/conda/feedstock_root/build_artifacts/stringcase_1734359593045/work
tabulate @ file:///home/conda/feedstock_root/build_artifacts/tabulate_1733589744265/work
tblib @ file:///home/conda/feedstock_root/build_artifacts/tblib_1733842374544/work
text-unidecode @ file:///home/conda/feedstock_root/build_artifacts/text-unidecode_1733749896243/work
threadpoolctl @ file:///home/conda/feedstock_root/build_artifacts/threadpoolctl_1741878222898/work
toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1734091811753/work
tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1733256695513/work
tomlkit @ file:///home/conda/feedstock_root/build_artifacts/tomlkit_1733230743009/work
toolz @ file:///home/conda/feedstock_root/build_artifacts/toolz_1733736030883/work
tornado @ file:///home/conda/feedstock_root/build_artifacts/tornado_1732615921868/work
tox @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_tox_1743166623/work
traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1733367359838/work
twine @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_twine_1737553658/work
typeguard==4.4.2
typer==0.15.2
typer-slim==0.15.2
types-click==7.1.8
types-pytz==2025.2.0.20250326
types-PyYAML==6.0.12.20250326
types-requests==2.32.0.20250328
types-setuptools==78.1.0.20250329
typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_typing_extensions_1743201626/work
typing_inspect @ file:///home/conda/feedstock_root/build_artifacts/typing_inspect_1733845867367/work
tzdata @ file:///home/conda/feedstock_root/build_artifacts/python-tzdata_1742745135198/work
ukkonen @ file:///home/conda/feedstock_root/build_artifacts/ukkonen_1725784039739/work
unicodedata2 @ file:///home/conda/feedstock_root/build_artifacts/unicodedata2_1736692503055/work
urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1734859416348/work
uvicorn @ file:///home/conda/feedstock_root/build_artifacts/uvicorn_1734292939144/work
uvloop @ file:///home/conda/feedstock_root/build_artifacts/uvloop_1730214330131/work
validators @ file:///home/conda/feedstock_root/build_artifacts/validators_1734028829948/work
virtualenv @ file:///home/conda/feedstock_root/build_artifacts/virtualenv_1741337798015/work
watchfiles @ file:///home/conda/feedstock_root/build_artifacts/watchfiles_1736550411223/work
wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1733231326287/work
websockets @ file:///home/conda/feedstock_root/build_artifacts/websockets_1741285452944/work
wrapt @ file:///home/conda/feedstock_root/build_artifacts/wrapt_1736869460534/work
xdoctest @ file:///home/conda/feedstock_root/build_artifacts/xdoctest_1724194343446/work
xyzservices @ file:///home/conda/feedstock_root/build_artifacts/xyzservices_1737234886776/work
zict @ file:///home/conda/feedstock_root/build_artifacts/zict_1733261551178/work
zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1732827521216/work
zstandard==0.23.0
| name: pandera
channels:
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_gnu
- _python_abi3_support=1.0=hd8ed1ab_1
- alabaster=0.7.16=pyhd8ed1ab_0
- annotated-types=0.7.0=pyhd8ed1ab_1
- anyio=4.9.0=pyh29332c3_0
- aom=3.9.1=hac33072_0
- argcomplete=3.6.1=pyhd8ed1ab_0
- astroid=2.15.8=py39hf3d152e_0
- asttokens=3.0.0=pyhd8ed1ab_1
- asv=0.6.4=py39hf88036b_1
- asv_runner=0.2.1=pyhd8ed1ab_0
- attrs=25.3.0=pyh71513ae_0
- aws-c-auth=0.8.6=hd08a7f5_4
- aws-c-cal=0.8.7=h043a21b_0
- aws-c-common=0.12.0=hb9d3cd8_0
- aws-c-compression=0.3.1=h3870646_2
- aws-c-event-stream=0.5.4=h04a3f94_2
- aws-c-http=0.9.4=hb9b18c6_4
- aws-c-io=0.17.0=h3dad3f2_6
- aws-c-mqtt=0.12.2=h108da3e_2
- aws-c-s3=0.7.13=h822ba82_2
- aws-c-sdkutils=0.2.3=h3870646_2
- aws-checksums=0.2.3=h3870646_2
- aws-crt-cpp=0.31.0=h55f77e1_4
- aws-sdk-cpp=1.11.510=h37a5c72_3
- azure-core-cpp=1.14.0=h5cfcd09_0
- azure-identity-cpp=1.10.0=h113e628_0
- azure-storage-blobs-cpp=12.13.0=h3cf044e_1
- azure-storage-common-cpp=12.8.0=h736e048_1
- azure-storage-files-datalake-cpp=12.12.0=ha633028_1
- babel=2.17.0=pyhd8ed1ab_0
- backports=1.0=pyhd8ed1ab_5
- backports.tarfile=1.2.0=pyhd8ed1ab_1
- black=25.1.0=pyha5154f8_0
- blosc=1.21.6=he440d0b_1
- bokeh=3.4.2=pyhd8ed1ab_0
- branca=0.8.1=pyhd8ed1ab_0
- brotli=1.1.0=hb9d3cd8_2
- brotli-bin=1.1.0=hb9d3cd8_2
- brotli-python=1.1.0=py39hf88036b_2
- bzip2=1.0.8=h4bc722e_7
- c-ares=1.34.4=hb9d3cd8_0
- ca-certificates=2025.1.31=hbcca054_0
- cachetools=5.5.2=pyhd8ed1ab_0
- certifi=2025.1.31=pyhd8ed1ab_0
- cffi=1.17.1=py39h15c3d72_0
- cfgv=3.3.1=pyhd8ed1ab_1
- chardet=5.2.0=pyhd8ed1ab_3
- charset-normalizer=3.4.1=pyhd8ed1ab_0
- click=8.1.8=pyh707e725_0
- cloudpickle=3.1.1=pyhd8ed1ab_0
- cmarkgfm=2024.11.20=py39h8cd3c5a_0
- colorama=0.4.6=pyhd8ed1ab_1
- colorlog=6.9.0=pyh707e725_1
- comm=0.2.2=pyhd8ed1ab_1
- commonmark=0.9.1=py_0
- contourpy=1.3.0=py39h74842e3_2
- coverage=7.8.0=py39h9399b63_0
- cpython=3.9.21=py39hd8ed1ab_1
- cryptography=44.0.2=py39h7170ec2_0
- cycler=0.12.1=pyhd8ed1ab_1
- cytoolz=1.0.1=py39h8cd3c5a_0
- dask=2024.8.0=pyhd8ed1ab_0
- dask-core=2024.8.0=pyhd8ed1ab_0
- dask-expr=1.1.10=pyhd8ed1ab_0
- dav1d=1.2.1=hd590300_0
- dbus=1.13.6=h5008d03_3
- debugpy=1.8.13=py39hf88036b_0
- decorator=5.2.1=pyhd8ed1ab_0
- dependency-groups=1.3.0=pyh29332c3_0
- dill=0.3.9=pyhd8ed1ab_1
- distlib=0.3.9=pyhd8ed1ab_1
- distributed=2024.8.0=pyhd8ed1ab_0
- dnspython=2.7.0=pyhff2d567_1
- docutils=0.21.2=pyhd8ed1ab_1
- email-validator=2.2.0=pyhd8ed1ab_1
- email_validator=2.2.0=hd8ed1ab_1
- exceptiongroup=1.2.2=pyhd8ed1ab_1
- execnet=2.1.1=pyhd8ed1ab_1
- executing=2.1.0=pyhd8ed1ab_1
- expat=2.6.4=h5888daf_0
- fastapi=0.115.12=pyh29332c3_0
- fastapi-cli=0.0.7=pyhd8ed1ab_0
- filelock=3.18.0=pyhd8ed1ab_0
- folium=0.19.5=pyhd8ed1ab_0
- fonttools=4.56.0=py39h9399b63_0
- freetype=2.13.3=h48d6fc4_0
- freexl=2.0.0=h9dce30a_2
- frictionless=4.40.8=pyh6c4a22f_0
- fsspec=2025.3.1=pyhd8ed1ab_0
- future=1.0.0=pyhd8ed1ab_2
- geopandas=1.0.1=pyhd8ed1ab_3
- geopandas-base=1.0.1=pyha770c72_3
- geos=3.13.1=h97f6797_0
- geotiff=1.7.4=h3551947_0
- gflags=2.2.2=h5888daf_1005
- giflib=5.2.2=hd590300_0
- glog=0.7.1=hbabe93e_0
- greenlet=3.1.1=py39hf88036b_1
- h11=0.14.0=pyhd8ed1ab_1
- h2=4.2.0=pyhd8ed1ab_0
- hpack=4.1.0=pyhd8ed1ab_0
- httpcore=1.0.7=pyh29332c3_1
- httptools=0.6.4=py39h8cd3c5a_0
- httpx=0.28.1=pyhd8ed1ab_0
- hyperframe=6.1.0=pyhd8ed1ab_0
- hypothesis=6.130.4=pyha770c72_0
- icu=75.1=he02047a_0
- id=1.5.0=pyh29332c3_0
- identify=2.6.9=pyhd8ed1ab_0
- idna=3.10=pyhd8ed1ab_1
- imagesize=1.4.1=pyhd8ed1ab_0
- importlib-metadata=8.6.1=pyha770c72_0
- importlib-resources=6.5.2=pyhd8ed1ab_0
- importlib_metadata=8.6.1=hd8ed1ab_0
- importlib_resources=6.5.2=pyhd8ed1ab_0
- iniconfig=2.0.0=pyhd8ed1ab_1
- ipykernel=6.29.5=pyh3099207_0
- ipython=8.18.1=pyh707e725_3
- isodate=0.7.2=pyhd8ed1ab_1
- isort=5.13.2=pyhd8ed1ab_1
- jaraco.classes=3.4.0=pyhd8ed1ab_2
- jaraco.context=6.0.1=pyhd8ed1ab_0
- jaraco.functools=4.1.0=pyhd8ed1ab_0
- jedi=0.19.2=pyhd8ed1ab_1
- jeepney=0.9.0=pyhd8ed1ab_0
- jinja2=3.1.6=pyhd8ed1ab_0
- joblib=1.4.2=pyhd8ed1ab_1
- json-c=0.18=h6688a6e_0
- json5=0.10.0=pyhd8ed1ab_1
- jsonschema=4.23.0=pyhd8ed1ab_1
- jsonschema-specifications=2024.10.1=pyhd8ed1ab_1
- jupyter-cache=1.0.1=pyhff2d567_0
- jupyter_client=8.6.3=pyhd8ed1ab_1
- jupyter_core=5.7.2=pyh31011fe_1
- keyring=25.6.0=pyha804496_0
- keyutils=1.6.1=h166bdaf_0
- kiwisolver=1.4.7=py39h74842e3_0
- krb5=1.21.3=h659f571_0
- lazy-object-proxy=1.10.0=py39h8cd3c5a_2
- lcms2=2.17=h717163a_0
- ld_impl_linux-64=2.43=h712a8e2_4
- lerc=4.0.0=h27087fc_0
- libabseil=20250127.1=cxx17_hbbce691_0
- libarchive=3.7.7=h4585015_3
- libarrow=19.0.1=h120c447_5_cpu
- libarrow-acero=19.0.1=hcb10f89_5_cpu
- libarrow-dataset=19.0.1=hcb10f89_5_cpu
- libarrow-substrait=19.0.1=h1bed206_5_cpu
- libavif16=1.2.1=hbb36593_2
- libblas=3.9.0=31_h59b9bed_openblas
- libbrotlicommon=1.1.0=hb9d3cd8_2
- libbrotlidec=1.1.0=hb9d3cd8_2
- libbrotlienc=1.1.0=hb9d3cd8_2
- libcblas=3.9.0=31_he106b2a_openblas
- libcrc32c=1.1.2=h9c3ff4c_0
- libcurl=8.12.1=h332b0f4_0
- libde265=1.0.15=h00ab1b0_0
- libdeflate=1.23=h4ddbbb0_0
- libedit=3.1.20250104=pl5321h7949ede_0
- libev=4.33=hd590300_2
- libevent=2.1.12=hf998b51_1
- libexpat=2.6.4=h5888daf_0
- libffi=3.4.6=h2dba641_0
- libgcc=14.2.0=h767d61c_2
- libgcc-ng=14.2.0=h69a702a_2
- libgdal-core=3.10.2=h05269f4_1
- libgfortran=14.2.0=h69a702a_2
- libgfortran-ng=14.2.0=h69a702a_2
- libgfortran5=14.2.0=hf1ad2bd_2
- libglib=2.84.0=h2ff4ddf_0
- libgomp=14.2.0=h767d61c_2
- libgoogle-cloud=2.36.0=hc4361e1_1
- libgoogle-cloud-storage=2.36.0=h0121fbd_1
- libgrpc=1.71.0=he753a82_0
- libheif=1.19.7=gpl_hc18d805_100
- libiconv=1.18=h4ce23a2_1
- libjpeg-turbo=3.0.0=hd590300_1
- libkml=1.3.0=hf539b9f_1021
- liblapack=3.9.0=31_h7ac8fdf_openblas
- liblzma=5.6.4=hb9d3cd8_0
- libnghttp2=1.64.0=h161d5f1_0
- libnsl=2.0.1=hd590300_0
- libopenblas=0.3.29=pthreads_h94d23a6_0
- libopentelemetry-cpp=1.19.0=hd1b1c89_0
- libopentelemetry-cpp-headers=1.19.0=ha770c72_0
- libparquet=19.0.1=h081d1f1_5_cpu
- libpng=1.6.47=h943b412_0
- libprotobuf=5.29.3=h501fc15_0
- libre2-11=2024.07.02=hba17884_3
- librttopo=1.1.0=hd718a1a_18
- libsodium=1.0.20=h4ab18f5_0
- libspatialite=5.1.0=h366e088_13
- libsqlite=3.49.1=hee588c1_2
- libssh2=1.11.1=hf672d98_0
- libstdcxx=14.2.0=h8f9b012_2
- libstdcxx-ng=14.2.0=h4852527_2
- libthrift=0.21.0=h0e7cc3e_0
- libtiff=4.7.0=hd9ff511_3
- libutf8proc=2.10.0=h4c51ac1_0
- libuuid=2.38.1=h0b41bf4_0
- libuv=1.50.0=hb9d3cd8_0
- libwebp-base=1.5.0=h851e524_0
- libxcb=1.17.0=h8a09558_0
- libxcrypt=4.4.36=hd590300_1
- libxml2=2.13.7=h8d12d68_0
- libzlib=1.3.1=hb9d3cd8_2
- locket=1.0.0=pyhd8ed1ab_0
- lz4=4.3.3=py39h92207c2_2
- lz4-c=1.10.0=h5888daf_1
- lzo=2.10=hd590300_1001
- mapclassify=2.8.1=pyhd8ed1ab_1
- markdown-it-py=3.0.0=pyhd8ed1ab_1
- marko=2.1.2=pyhd8ed1ab_1
- markupsafe=3.0.2=py39h9399b63_1
- matplotlib-base=3.9.4=py39h16632d1_0
- matplotlib-inline=0.1.7=pyhd8ed1ab_1
- mccabe=0.7.0=pyhd8ed1ab_1
- mdit-py-plugins=0.4.2=pyhd8ed1ab_1
- mdurl=0.1.2=pyhd8ed1ab_1
- minizip=4.0.7=h05a5f5f_3
- modin=0.32.0=hd8ed1ab_2
- modin-core=0.32.0=pyhd8ed1ab_2
- modin-dask=0.32.0=hd8ed1ab_2
- more-itertools=10.6.0=pyhd8ed1ab_0
- msgpack-python=1.1.0=py39h74842e3_0
- multimethod=2.0=pyhd8ed1ab_0
- munkres=1.1.4=pyh9f0ad1d_0
- mypy=1.10.0=py39hd3abc70_0
- mypy_extensions=1.0.0=pyha770c72_1
- myst-nb=1.2.0=pyh29332c3_0
- myst-parser=3.0.1=pyhd8ed1ab_0
- nbclient=0.10.2=pyhd8ed1ab_0
- nbformat=5.10.4=pyhd8ed1ab_1
- ncurses=6.5=h2d0b736_3
- nest-asyncio=1.6.0=pyhd8ed1ab_1
- networkx=3.2.1=pyhd8ed1ab_0
- nh3=0.2.21=py39h77e2912_1
- nlohmann_json=3.11.3=he02047a_1
- nodeenv=1.9.1=pyhd8ed1ab_1
- nox=2025.2.9=pyh29332c3_1
- numpy=1.26.4=py39h474f0d3_0
- openjpeg=2.5.3=h5fbd93e_0
- openssl=3.4.1=h7b32b05_0
- orc=2.1.1=h17f744e_1
- packaging=24.2=pyhd8ed1ab_2
- pandas=2.2.2=py39hfc16268_1
- pandas-stubs=2.2.3.241126=pyhd8ed1ab_0
- parso=0.8.4=pyhd8ed1ab_1
- partd=1.4.2=pyhd8ed1ab_0
- pathspec=0.12.1=pyhd8ed1ab_1
- pcre2=10.44=hba22ea6_2
- petl=1.7.15=pyhd8ed1ab_0
- pexpect=4.9.0=pyhd8ed1ab_1
- pickleshare=0.7.5=pyhd8ed1ab_1004
- pillow=11.1.0=py39h15c0740_0
- pip=25.0.1=pyh8b19718_0
- pkgutil-resolve-name=1.3.10=pyhd8ed1ab_2
- platformdirs=4.3.7=pyh29332c3_0
- pluggy=1.5.0=pyhd8ed1ab_1
- polars=1.26.0=py39h0cd0d40_0
- pre-commit=4.2.0=pyha770c72_0
- pre_commit=4.2.0=hd8ed1ab_0
- proj=9.5.1=h0054346_0
- prometheus-cpp=1.3.0=ha5d0236_0
- prompt-toolkit=3.0.50=pyha770c72_0
- protobuf=5.29.3=py39hbeaf701_0
- psutil=7.0.0=py39h8cd3c5a_0
- pthread-stubs=0.4=hb9d3cd8_1002
- ptyprocess=0.7.0=pyhd8ed1ab_1
- pure_eval=0.2.3=pyhd8ed1ab_1
- py4j=0.10.9.7=pyhd8ed1ab_0
- pyarrow=19.0.1=py39hf3d152e_0
- pyarrow-core=19.0.1=py39h6117c73_0_cpu
- pyarrow-hotfix=0.6=pyhd8ed1ab_1
- pycparser=2.22=pyh29332c3_1
- pydantic=2.10.6=pyh3cfb1c2_0
- pydantic-core=2.27.2=py39he612d8f_0
- pygments=2.19.1=pyhd8ed1ab_0
- pylint=2.17.3=pyhd8ed1ab_0
- pympler=1.1=pyhd8ed1ab_1
- pyogrio=0.10.0=py39h4bd6204_1
- pyparsing=3.2.3=pyhd8ed1ab_1
- pyproj=3.6.1=py39h306d449_10
- pyproject-api=1.9.0=pyh29332c3_0
- pysocks=1.7.1=pyha55dd90_7
- pyspark=3.5.5=pyhd8ed1ab_0
- pytest=8.3.5=pyhd8ed1ab_0
- pytest-asyncio=0.26.0=pyh29332c3_0
- pytest-cov=6.0.0=pyhd8ed1ab_1
- pytest-xdist=3.6.1=pyhd8ed1ab_1
- python=3.9.21=h9c0c6dc_1_cpython
- python-dateutil=2.9.0.post0=pyhff2d567_1
- python-dotenv=1.1.0=pyh29332c3_1
- python-fastjsonschema=2.21.1=pyhd8ed1ab_0
- python-gil=3.9.21=hd8ed1ab_1
- python-multipart=0.0.20=pyhff2d567_0
- python-slugify=8.0.4=pyhd8ed1ab_1
- python-tzdata=2025.2=pyhd8ed1ab_0
- python_abi=3.9=5_cp39
- pytz=2025.2=pyhd8ed1ab_0
- pyyaml=6.0.2=py39h9399b63_2
- pyzmq=26.3.0=py39h4e4fb57_0
- qhull=2020.2=h434a139_5
- rav1e=0.6.6=he8a937b_2
- re2=2024.07.02=h9925aae_3
- readline=8.2=h8c095d6_2
- readme_renderer=44.0=pyhd8ed1ab_1
- recommonmark=0.7.1=pyhd8ed1ab_1
- referencing=0.36.2=pyh29332c3_0
- requests=2.32.3=pyhd8ed1ab_1
- requests-toolbelt=1.0.0=pyhd8ed1ab_1
- rfc3986=2.0.0=pyhd8ed1ab_1
- rich=14.0.0=pyh29332c3_0
- rich-toolkit=0.11.3=pyh29332c3_0
- rpds-py=0.24.0=py39h3506688_0
- s2n=1.5.14=h6c98b2b_0
- scikit-learn=1.6.1=py39h4b7350c_0
- scipy=1.13.1=py39haf93ffa_0
- secretstorage=3.3.3=py39hf3d152e_3
- setuptools=75.8.2=pyhff2d567_0
- shapely=2.0.7=py39h322cc2b_1
- shellingham=1.5.4=pyhd8ed1ab_1
- simpleeval=0.9.13=pyhd8ed1ab_1
- six=1.17.0=pyhd8ed1ab_0
- snappy=1.2.1=h8bd8927_1
- sniffio=1.3.1=pyhd8ed1ab_1
- snowballstemmer=2.2.0=pyhd8ed1ab_0
- sortedcontainers=2.4.0=pyhd8ed1ab_1
- sphinx=7.4.7=pyhd8ed1ab_0
- sphinx-autodoc-typehints=1.12.0=pyhd8ed1ab_0
- sphinx-copybutton=0.5.2=pyhd8ed1ab_1
- sphinx-design=0.6.1=pyhd8ed1ab_2
- sphinxcontrib-applehelp=2.0.0=pyhd8ed1ab_1
- sphinxcontrib-devhelp=2.0.0=pyhd8ed1ab_1
- sphinxcontrib-htmlhelp=2.1.0=pyhd8ed1ab_1
- sphinxcontrib-jsmath=1.0.1=pyhd8ed1ab_1
- sphinxcontrib-qthelp=2.0.0=pyhd8ed1ab_1
- sphinxcontrib-serializinghtml=1.1.10=pyhd8ed1ab_1
- sqlalchemy=2.0.40=py39h8cd3c5a_0
- sqlite=3.49.1=h9eae976_2
- stack_data=0.6.3=pyhd8ed1ab_1
- starlette=0.46.1=pyha770c72_0
- stringcase=1.2.0=pyhd8ed1ab_2
- svt-av1=3.0.2=h5888daf_0
- tabulate=0.9.0=pyhd8ed1ab_2
- tblib=3.0.0=pyhd8ed1ab_1
- text-unidecode=1.3=pyhd8ed1ab_2
- threadpoolctl=3.6.0=pyhecae5ae_0
- tk=8.6.13=noxft_h4845f30_101
- toml=0.10.2=pyhd8ed1ab_1
- tomli=2.2.1=pyhd8ed1ab_1
- tomlkit=0.13.2=pyha770c72_1
- toolz=1.0.0=pyhd8ed1ab_1
- tornado=6.4.2=py39h8cd3c5a_0
- tox=4.25.0=pyh29332c3_1
- traitlets=5.14.3=pyhd8ed1ab_1
- twine=6.1.0=pyh29332c3_0
- typer=0.15.2=pyhff008b6_0
- typer-slim=0.15.2=pyh29332c3_0
- typer-slim-standard=0.15.2=h801b22e_0
- typing-extensions=4.13.0=h9fa5a19_1
- typing_extensions=4.13.0=pyh29332c3_1
- typing_inspect=0.9.0=pyhd8ed1ab_1
- tzdata=2025b=h78e105d_0
- ukkonen=1.0.1=py39h74842e3_5
- unicodedata2=16.0.0=py39h8cd3c5a_0
- uriparser=0.9.8=hac33072_0
- urllib3=2.3.0=pyhd8ed1ab_0
- uvicorn=0.34.0=pyh31011fe_0
- uvicorn-standard=0.34.0=h31011fe_0
- uvloop=0.21.0=py39h8cd3c5a_1
- validators=0.34.0=pyhd8ed1ab_1
- virtualenv=20.29.3=pyhd8ed1ab_0
- watchfiles=1.0.4=py39he612d8f_0
- wcwidth=0.2.13=pyhd8ed1ab_1
- websockets=15.0.1=py39h8cd3c5a_0
- wheel=0.45.1=pyhd8ed1ab_1
- wrapt=1.17.2=py39h8cd3c5a_0
- x265=3.5=h924138e_3
- xdoctest=1.2.0=pyhd8ed1ab_0
- xerces-c=3.2.5=h988505b_2
- xorg-libxau=1.0.12=hb9d3cd8_0
- xorg-libxdmcp=1.1.5=hb9d3cd8_0
- xyzservices=2025.1.0=pyhd8ed1ab_0
- yaml=0.2.5=h7f98852_2
- zeromq=4.3.5=h3b0a872_7
- zict=3.0.0=pyhd8ed1ab_1
- zipp=3.21.0=pyhd8ed1ab_1
- zlib=1.3.1=hb9d3cd8_2
- zstandard=0.23.0=py39h8cd3c5a_1
- zstd=1.5.7=hb8e6e7a_2
- pip:
- aiosignal==1.3.2
- beautifulsoup4==4.13.3
- frozenlist==1.5.0
- furo==2024.8.6
- googleapis-common-protos==1.69.2
- grpcio==1.71.0
- grpcio-status==1.71.0
- pandera==0.0.0+dev0
- pytest-mock==3.14.0
- ray==2.44.1
- soupsieve==2.6
- sphinx-basic-ng==1.0.0b2
- sphinx-docsearch==0.1.0
- typeguard==4.4.2
- types-click==7.1.8
- types-pytz==2025.2.0.20250326
- types-pyyaml==6.0.12.20250326
- types-requests==2.32.0.20250328
- types-setuptools==78.1.0.20250329
prefix: /opt/conda/envs/pandera
| [
"tests/core/test_schema_statistics.py::test_get_dataframe_schema_statistics",
"tests/core/test_schema_statistics.py::test_get_series_schema_statistics",
"tests/core/test_schema_statistics.py::test_get_index_schema_statistics[index_schema_component0-expectation0]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks0-expectation0]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks1-expectation1]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks2-expectation2]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks3-expectation3]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks4-expectation4]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks5-expectation5]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks6-expectation6]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks7-expectation7]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks8-expectation8]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks9-expectation9]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks10-expectation10]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks11-expectation11]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_no_param",
"tests/io/test_io.py::test_to_yaml",
"tests/io/test_io.py::test_from_yaml[\\nschema_type:"
] | [
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[UINT8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[PythonTypedDict]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[FLOAT32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Int64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Float16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[PythonDict]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Complex64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[PythonList]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowFloat32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowTimestamp]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowMap]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowBool]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Int8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Int32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[UInt16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Complex256]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowFloat64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowStruct]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowDate64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowTime64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowUInt16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[INT8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[UInt64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[UINT32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[UInt8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[INT32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Decimal]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Sparse]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Int16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Float64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[UINT64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[PythonTuple]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowInt32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[UINT16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[FLOAT64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[UInt32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Date]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowInt64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowDecimal128]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowInt16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowUInt64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Complex128]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowNull]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[BOOL]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Float128]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Timedelta64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[PythonNamedTuple]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Float32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowInt8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowUInt8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowTime32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowUInt32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[INT16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowFloat16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[DateTime]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowDate32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[INT64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowDuration]",
"tests/io/test_io.py::test_from_yaml_retains_ordered_keyword[True-test_data0-SchemaError]",
"tests/io/test_io.py::test_from_yaml_retains_ordered_keyword[True-test_data1-expected1]",
"tests/io/test_io.py::test_from_yaml_retains_ordered_keyword[False-test_data2-expected2]",
"tests/io/test_io.py::test_from_yaml_retains_ordered_keyword[False-test_data3-expected3]",
"tests/io/test_io.py::test_to_yaml_registered_dataframe_check",
"tests/io/test_io.py::test_to_yaml_retains_ordered_keyword[True-test_data0-SchemaError]",
"tests/io/test_io.py::test_to_yaml_retains_ordered_keyword[True-test_data1-expected1]",
"tests/io/test_io.py::test_to_yaml_retains_ordered_keyword[False-test_data2-expected2]",
"tests/io/test_io.py::test_to_yaml_retains_ordered_keyword[False-test_data3-expected3]",
"tests/io/test_io.py::test_frictionless_schema_parses_correctly[frictionless_schema0]",
"tests/io/test_io.py::test_frictionless_schema_parses_correctly[frictionless_schema1]"
] | [
"tests/core/test_pandas_engine.py::test_pandas_data_type[UINT8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowDictionary]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[PythonTypedDict]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Category]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[PydanticModel]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowLargeString]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[FLOAT32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Int64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Float16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[PythonDict]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Complex64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[PythonList]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowFloat32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowTimestamp]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowMap]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowBinary]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowBool]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Object]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Int8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Int32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Period]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[UInt16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Complex256]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowFloat64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowStruct]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowDate64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowTime64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowUInt16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[INT8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[UInt64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[UINT32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[UInt8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[INT32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Decimal]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Sparse]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Int16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Float64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[UINT64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[STRING]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[PythonTuple]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowInt32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowList]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[UINT16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowLargeBinary]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[FLOAT64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Bool]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[UInt32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Date]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowInt64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowDecimal128]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowInt16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowUInt64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Complex128]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Interval]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowNull]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[BOOL]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Float128]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Timedelta64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[PythonNamedTuple]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[Float32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowInt8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowUInt8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowTime32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowUInt32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[INT16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowFloat16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[DateTime]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowDate32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[INT64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[NpString]",
"tests/core/test_pandas_engine.py::test_pandas_data_type[ArrowDuration]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowDictionary]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Category]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[PydanticModel]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowLargeString]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowBinary]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Object]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Period]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowString]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[STRING]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowList]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[ArrowLargeBinary]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Bool]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[Interval]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_coerce[NpString]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[UINT8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowDictionary]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[PythonTypedDict]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Category]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[PydanticModel]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowLargeString]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[FLOAT32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Int64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Float16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[PythonDict]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Complex64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[PythonList]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowFloat32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowTimestamp]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowMap]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowBinary]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowBool]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Object]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Int8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Int32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Period]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[UInt16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Complex256]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowString]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowFloat64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowStruct]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowDate64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowTime64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowUInt16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[INT8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[UInt64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[UINT32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[UInt8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[INT32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Decimal]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Sparse]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Int16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Float64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[UINT64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[STRING]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[PythonTuple]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowInt32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowList]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[UINT16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowLargeBinary]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[FLOAT64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Bool]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[UInt32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Date]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowInt64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowDecimal128]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowInt16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowUInt64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Complex128]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Interval]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowNull]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[BOOL]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Float128]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Timedelta64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[PythonNamedTuple]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[Float32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowInt8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowUInt8]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowTime32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowUInt32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[INT16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowFloat16]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[DateTime]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowDate32]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[INT64]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[NpString]",
"tests/core/test_pandas_engine.py::test_pandas_data_type_check[ArrowDuration]",
"tests/core/test_pandas_engine.py::test_pandas_category_dtype",
"tests/core/test_pandas_engine.py::test_pandas_category_dtype_error",
"tests/core/test_pandas_engine.py::test_pandas_boolean_native_type",
"tests/core/test_pandas_engine.py::test_pandas_boolean_native_type_error",
"tests/core/test_pandas_engine.py::test_pandas_datetimetz_dtype[True]",
"tests/core/test_pandas_engine.py::test_pandas_datetimetz_dtype[False]",
"tests/core/test_pandas_engine.py::test_pandas_date_coerce_dtype[True]",
"tests/core/test_pandas_engine.py::test_pandas_date_coerce_dtype[False]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data0-dtype0]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data1-dtype1]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data2-dtype2]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data3-null]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data4-date32]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data5-date64]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data6-dtype6]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data7-dtype7]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data8-dtype8]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data9-dtype9]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data10-dtype10]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data11-dtype11]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data12-dtype12]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data13-dtype13]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype[data14-dtype14]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data0-dtype0]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data1-dtype1]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data2-dtype2]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data3-null]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data4-date32]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data5-date64]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data6-dtype6]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data7-dtype7]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data8-dtype8]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data9-dtype9]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data10-dtype10]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data11-dtype11]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data12-dtype12]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data13-dtype13]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data14-dtype14]",
"tests/core/test_pandas_engine.py::test_pandas_arrow_dtype_error[data15-dtype15]",
"tests/core/test_schema_statistics.py::test_infer_dataframe_statistics[False-False]",
"tests/core/test_schema_statistics.py::test_infer_dataframe_statistics[True-False]",
"tests/core/test_schema_statistics.py::test_infer_dataframe_statistics[False-True]",
"tests/core/test_schema_statistics.py::test_infer_dataframe_statistics[True-True]",
"tests/core/test_schema_statistics.py::test_parse_check_statistics[check_stats0-expectation0]",
"tests/core/test_schema_statistics.py::test_parse_check_statistics[check_stats1-expectation1]",
"tests/core/test_schema_statistics.py::test_parse_check_statistics[check_stats2-expectation2]",
"tests/core/test_schema_statistics.py::test_parse_check_statistics[check_stats3-expectation3]",
"tests/core/test_schema_statistics.py::test_parse_check_statistics[check_stats4-None]",
"tests/core/test_schema_statistics.py::test_infer_series_schema_statistics[series0-expectation0]",
"tests/core/test_schema_statistics.py::test_infer_series_schema_statistics[series1-expectation1]",
"tests/core/test_schema_statistics.py::test_infer_series_schema_statistics[series2-expectation2]",
"tests/core/test_schema_statistics.py::test_infer_series_schema_statistics[series3-expectation3]",
"tests/core/test_schema_statistics.py::test_infer_series_schema_statistics[series4-expectation4]",
"tests/core/test_schema_statistics.py::test_infer_series_schema_statistics[series5-expectation5]",
"tests/core/test_schema_statistics.py::test_infer_series_schema_statistics[series6-expectation6]",
"tests/core/test_schema_statistics.py::test_infer_series_schema_statistics[series7-expectation7]",
"tests/core/test_schema_statistics.py::test_infer_series_schema_statistics[series8-expectation8]",
"tests/core/test_schema_statistics.py::test_infer_nullable_series_schema_statistics[0-series0-expectation0]",
"tests/core/test_schema_statistics.py::test_infer_nullable_series_schema_statistics[0-series1-expectation1]",
"tests/core/test_schema_statistics.py::test_infer_nullable_series_schema_statistics[0-series2-expectation2]",
"tests/core/test_schema_statistics.py::test_infer_nullable_series_schema_statistics[0-series3-expectation3]",
"tests/core/test_schema_statistics.py::test_infer_nullable_series_schema_statistics[0-series4-expectation4]",
"tests/core/test_schema_statistics.py::test_infer_nullable_series_schema_statistics[0-series5-expectation5]",
"tests/core/test_schema_statistics.py::test_infer_nullable_series_schema_statistics[0-series6-expectation6]",
"tests/core/test_schema_statistics.py::test_infer_nullable_series_schema_statistics[0-series7-expectation7]",
"tests/core/test_schema_statistics.py::test_infer_nullable_series_schema_statistics[0-series8-expectation8]",
"tests/core/test_schema_statistics.py::test_infer_nullable_series_schema_statistics[0-series9-expectation9]",
"tests/core/test_schema_statistics.py::test_infer_nullable_series_schema_statistics[0-series10-expectation10]",
"tests/core/test_schema_statistics.py::test_infer_nullable_series_schema_statistics[0-series11-expectation11]",
"tests/core/test_schema_statistics.py::test_infer_nullable_series_schema_statistics[2-series12-expectation12]",
"tests/core/test_schema_statistics.py::test_empty_series_schema_statistics[null_values0-dtype0]",
"tests/core/test_schema_statistics.py::test_empty_series_schema_statistics[null_values1-dtype1]",
"tests/core/test_schema_statistics.py::test_empty_series_schema_statistics[null_values2-dtype2]",
"tests/core/test_schema_statistics.py::test_empty_series_schema_statistics[null_values3-dtype3]",
"tests/core/test_schema_statistics.py::test_empty_series_schema_statistics[null_values4-dtype4]",
"tests/core/test_schema_statistics.py::test_empty_series_schema_statistics[null_values5-dtype5]",
"tests/core/test_schema_statistics.py::test_infer_index_statistics[index0-expectation0]",
"tests/core/test_schema_statistics.py::test_infer_index_statistics[index1-expectation1]",
"tests/core/test_schema_statistics.py::test_infer_index_statistics[index2-expectation2]",
"tests/core/test_schema_statistics.py::test_infer_index_statistics[index3-expectation3]",
"tests/core/test_schema_statistics.py::test_infer_index_statistics[1-UserWarning]",
"tests/core/test_schema_statistics.py::test_infer_index_statistics[foo-UserWarning]",
"tests/core/test_schema_statistics.py::test_infer_index_statistics[index6-UserWarning]",
"tests/core/test_schema_statistics.py::test_infer_index_statistics[index7-UserWarning]",
"tests/core/test_schema_statistics.py::test_infer_index_statistics[index8-UserWarning]",
"tests/core/test_schema_statistics.py::test_infer_index_statistics[index9-UserWarning]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks12-ValueError]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks13-ValueError]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks14-ValueError]",
"tests/core/test_schema_statistics.py::test_parse_checks_and_statistics_roundtrip[checks15-ValueError]",
"tests/io/test_io.py::test_inferred_schema_io",
"tests/io/test_io.py::test_from_yaml_unregistered_checks",
"tests/io/test_io.py::test_from_yaml_load_required_fields",
"tests/io/test_io.py::test_io_yaml_file_obj",
"tests/io/test_io.py::test_io_yaml[single]",
"tests/io/test_io.py::test_io_yaml[multi]",
"tests/io/test_io.py::test_io_yaml[None]",
"tests/io/test_io.py::test_io_json[single]",
"tests/io/test_io.py::test_io_json[multi]",
"tests/io/test_io.py::test_io_json[None]",
"tests/io/test_io.py::test_to_script[single]",
"tests/io/test_io.py::test_to_script[multi]",
"tests/io/test_io.py::test_to_script[None]",
"tests/io/test_io.py::test_to_script_lambda_check",
"tests/io/test_io.py::test_to_yaml_lambda_check",
"tests/io/test_io.py::test_format_checks_warning",
"tests/io/test_io.py::test_to_yaml_custom_dataframe_check",
"tests/io/test_io.py::test_to_yaml_bugfix_warn_unregistered_global_checks",
"tests/io/test_io.py::test_serialize_deserialize_custom_datetime_checks",
"tests/io/test_io.py::test_frictionless_schema_primary_key[frictionless_schema0]",
"tests/io/test_io.py::test_frictionless_schema_primary_key[frictionless_schema1]"
] | [] | MIT License | 20,139 | 2,343 | [
"pandera/engines/pandas_engine.py",
"pandera/engines/pyarrow_engine.py",
"pandera/io/pandas_io.py",
"pandera/schema_statistics/pandas.py"
] |
fzakaria__nix-auto-follow-21 | a76c20cd36fb2b6fa4a58be4c5edf4ae6e143648 | 2024-11-03 03:02:22 | a76c20cd36fb2b6fa4a58be4c5edf4ae6e143648 | diff --git a/src/nix_auto_follow/cli.py b/src/nix_auto_follow/cli.py
index d07da12..7a353cb 100644
--- a/src/nix_auto_follow/cli.py
+++ b/src/nix_auto_follow/cli.py
@@ -3,6 +3,7 @@ import json
import sys
from dataclasses import dataclass, field
from typing import Any, TextIO
+from urllib.parse import urlencode
@dataclass
@@ -48,6 +49,17 @@ class Node:
rev_or_ref = next((x for x in [rev, ref] if x), "")
match original["type"]:
+ case "git":
+ base_url = f"git+{original['url']}"
+ filtered_params = {
+ k: (1 if v is True else 0 if v is False else v)
+ for k, v in original.items()
+ if k in ["rev", "ref", "submodules", "shallow"]
+ and v not in (None, "")
+ }
+ query_string = urlencode(filtered_params)
+ full_url = f"{base_url}?{query_string}" if query_string else base_url
+ return full_url
case "github":
return f"github:{original['owner']}/{original['repo']}{rev_or_ref}"
case "gitlab":
| Support git flake type
When checking an input like ``nixpkgs.url = "git+https://github.com/SuperSandro2000/nixpkgs.git?shallow=1&ref=nixos-unstable";`` the following error occurs:
```
Traceback (most recent call last):
File "/nix/store/bw8y4yyjwlz01gvi50q2zzaaqwpg0iig-nix-auto-follow/bin/.auto-follow-wrapped", line 9, in <module>
sys.exit(start())
^^^^^^^
File "/nix/store/bw8y4yyjwlz01gvi50q2zzaaqwpg0iig-nix-auto-follow/lib/python3.11/site-packages/nix_auto_follow/cli.py", line 207, in start
if not check_lock_file(LockFile.from_dict(flake_lock_json)):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/nix/store/bw8y4yyjwlz01gvi50q2zzaaqwpg0iig-nix-auto-follow/lib/python3.11/site-packages/nix_auto_follow/cli.py", line 116, in check_lock_file
f"Please add '{key}.url = \"{flake_lock.nodes[ref].get_url()}\"' or '{other_key}.url = \"{flake_lock.nodes[other_ref].get_url()}'" # noqa: E501
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/nix/store/bw8y4yyjwlz01gvi50q2zzaaqwpg0iig-nix-auto-follow/lib/python3.11/site-packages/nix_auto_follow/cli.py", line 60, in get_url
raise ValueError(f"Unknown type {original['type']}")
ValueError: Unknown type git
``` | fzakaria/nix-auto-follow | diff --git a/tests/test_cli.py b/tests/test_cli.py
index 1951b08..9ed657f 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -67,6 +67,54 @@ from nix_auto_follow.cli import (
),
"nixpkgs/nixos-unstable/23.11",
),
+ (
+ Node.from_dict(
+ {
+ "original": {
+ "type": "git",
+ "url": "https://github.com/kaeeraa/ayugram-desktop",
+ }
+ }
+ ),
+ "git+https://github.com/kaeeraa/ayugram-desktop",
+ ),
+ (
+ Node.from_dict(
+ {
+ "original": {
+ "type": "git",
+ "submodules": True,
+ "url": "https://github.com/kaeeraa/ayugram-desktop",
+ }
+ }
+ ),
+ "git+https://github.com/kaeeraa/ayugram-desktop?submodules=1",
+ ),
+ (
+ Node.from_dict(
+ {
+ "original": {
+ "type": "git",
+ "shallow": True,
+ "url": "ssh://[email protected]/mslxl/scripts.git",
+ }
+ }
+ ),
+ "git+ssh://[email protected]/mslxl/scripts.git?shallow=1",
+ ),
+ (
+ Node.from_dict(
+ {
+ "original": {
+ "type": "git",
+ "ref": "main",
+ "shallow": True,
+ "url": "ssh://[email protected]/akibahmed/sops-secrects.git",
+ }
+ }
+ ),
+ "git+ssh://[email protected]/akibahmed/sops-secrects.git?ref=main&shallow=1",
+ ),
],
)
def test_get_url_for_node(node: Node, expected_url: str) -> None:
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.10",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | black==25.1.0
click==8.1.8
coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
flake8-print==5.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==6.0.1
mccabe==0.7.0
mypy==1.15.0
mypy-extensions==1.0.0
-e git+https://github.com/fzakaria/nix-auto-follow.git@a76c20cd36fb2b6fa4a58be4c5edf4ae6e143648#egg=nix_auto_follow
packaging @ file:///croot/packaging_1734472117206/work
pathspec==0.12.1
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pycodestyle==2.13.0
pyflakes==3.3.2
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
| name: nix-auto-follow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py310h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py310h06a4308_0
- pip=25.0=py310h06a4308_0
- pluggy=1.5.0=py310h06a4308_0
- pytest=8.3.4=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py310h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- black==25.1.0
- click==8.1.8
- coverage==7.8.0
- flake8==7.2.0
- flake8-print==5.0.0
- isort==6.0.1
- mccabe==0.7.0
- mypy==1.15.0
- mypy-extensions==1.0.0
- nix-auto-follow==0.1.dev32+ga76c20c
- pathspec==0.12.1
- platformdirs==4.3.7
- pycodestyle==2.13.0
- pyflakes==3.3.2
- typing-extensions==4.13.0
prefix: /opt/conda/envs/nix-auto-follow
| [
"tests/test_cli.py::test_get_url_for_node[node6-git+https://github.com/kaeeraa/ayugram-desktop]",
"tests/test_cli.py::test_get_url_for_node[node7-git+https://github.com/kaeeraa/ayugram-desktop?submodules=1]",
"tests/test_cli.py::test_get_url_for_node[node8-git+ssh://[email protected]/mslxl/scripts.git?shallow=1]",
"tests/test_cli.py::test_get_url_for_node[node9-git+ssh://[email protected]/akibahmed/sops-secrects.git?ref=main&shallow=1]"
] | [] | [
"tests/test_cli.py::test_get_url_for_node[node0-github:nixos/nixpkgs/nixos-24.05]",
"tests/test_cli.py::test_get_url_for_node[node1-github:nixos/nixpkgs]",
"tests/test_cli.py::test_get_url_for_node[node2-nixpkgs]",
"tests/test_cli.py::test_get_url_for_node[node3-nixpkgs]",
"tests/test_cli.py::test_get_url_for_node[node4-nixpkgs/nixos-unstable]",
"tests/test_cli.py::test_get_url_for_node[node5-nixpkgs/nixos-unstable/23.11]",
"tests/test_cli.py::test_simple_follow_flake",
"tests/test_cli.py::test_simple_root_has_follow_flake",
"tests/test_cli.py::test_full_start",
"tests/test_cli.py::test_check_lock_file_success[tests/fixtures/has_follow.json]",
"tests/test_cli.py::test_check_lock_file_success[tests/fixtures/root_has_follow.json]",
"tests/test_cli.py::test_check_lock_file_fail",
"tests/test_cli.py::test_do_not_include_empty_inputs",
"tests/test_cli.py::test_top_level_keys_sorted",
"tests/test_cli.py::test_node_keys_sorted"
] | [] | MIT License | 20,140 | 305 | [
"src/nix_auto_follow/cli.py"
] |
|
scikit-rf__scikit-rf-1195 | ed9e7ac75b7911763c437e8e0499a7211fbfc418 | 2024-11-03 22:59:00 | 875e0861e4e5132db42f18f362da4cec5898f3c8 | diff --git a/skrf/network.py b/skrf/network.py
index 4354e1bd..7fb15a25 100644
--- a/skrf/network.py
+++ b/skrf/network.py
@@ -2905,6 +2905,7 @@ class Network:
#Not supported by rational_interp
is_rational = True
else:
+ kwargs["kind"] = kind if kind is not None else "linear"
f_interp = interp1d
# interpret input
| Interpolate method appears to return linear interpolation of network regardless of "kind" parameter
I have been using the network 'interpolation' method for over a year. When I most recently ran my code, the interpolation result appears to return a linear interpolation of the network S parameters regardless of what value is passed to the 'kind' parameter.
Thank you for looking into this.
Erik | scikit-rf/scikit-rf | diff --git a/skrf/tests/test_network.py b/skrf/tests/test_network.py
index 5b60f2c0..fd117fcb 100644
--- a/skrf/tests/test_network.py
+++ b/skrf/tests/test_network.py
@@ -1461,11 +1461,17 @@ class NetworkTestCase(unittest.TestCase):
self.assertTrue( ((a+[1+1j,2+2j]).s == np.array([[[2+3j]],[[5+6j]]])).all())
- def test_interpolate(self):
- a = rf.N(f=[1,2],s=[1+2j, 3+4j],z0=1, f_unit="ghz")
- freq = rf.F.from_f(np.linspace(1,2,4), unit='ghz')
- b = a.interpolate(freq)
- # TODO: numerically test for correct interpolation
+ def test_interpolate_linear(self):
+ net = rf.Network(f=[0, 1, 3, 4], s=[0,1,9,16], f_unit="Hz")
+
+ interp = net.interpolate(rf.Frequency(0, 4, 5, unit="Hz"), kind="linear")
+ assert np.allclose(interp.s[2], 5.0)
+
+ def test_interpolate_cubic(self):
+ net = rf.Network(f=[0, 1, 3, 4], s=[0,1,9,16], f_unit="Hz")
+
+ interp = net.interpolate(rf.Frequency(0, 4, 5, unit="Hz"), kind="cubic")
+ assert np.allclose(interp.s[2], 4.0)
def test_interpolate_rational(self):
a = rf.N(f=np.linspace(1,2,5),s=np.linspace(0,1,5)*(1+1j),z0=1, f_unit="ghz")
@@ -1479,21 +1485,6 @@ class NetworkTestCase(unittest.TestCase):
self.assertTrue(all(np.diff(np.abs(b.s.flatten())) > 0))
self.assertTrue(b.z0[0] == a.z0[0])
- def test_interpolate_linear(self):
- a = rf.N(f=[1,2],s=[1+2j, 3+4j],z0=[1,2], f_unit="ghz")
- freq = rf.F.from_f(np.linspace(1,2,3,endpoint=True), unit='GHz')
- b = a.interpolate(freq, kind='linear')
- self.assertFalse(any(np.isnan(b.s)))
- # Test that the endpoints are the equal
- # Middle point can also be calculated in this case
- self.assertTrue(b.s[0] == a.s[0])
- self.assertTrue(b.s[1] == 0.5*(a.s[0] + a.s[1]))
- self.assertTrue(b.s[-1] == a.s[-1])
- # Check Z0 interpolation
- self.assertTrue(b.z0[0] == a.z0[0])
- self.assertTrue(b.z0[1] == 0.5*(a.z0[0] + a.z0[1]))
- self.assertTrue(b.z0[-1] == a.z0[-1])
-
def test_interpolate_freq_cropped(self):
a = rf.N(f=np.arange(20), s=np.arange(20)*(1+1j),z0=1, f_unit="ghz")
freq = rf.F.from_f(np.linspace(1,2,3,endpoint=True), unit='GHz')
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test,visa,plot,xlsx,netw,docs]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
asttokens==3.0.0
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
charset-normalizer==3.4.1
comm==0.2.2
contourpy==1.3.0
coverage==7.8.0
cycler==0.12.1
debugpy==1.8.13
decorator==5.2.1
defusedxml==0.7.1
docutils==0.21.2
et_xmlfile==2.0.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
executing==2.2.0
fastjsonschema==2.21.1
fonttools==4.56.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
ipykernel==6.29.5
ipython==8.18.1
ipywidgets==8.1.5
jedi==0.19.2
Jinja2==3.1.6
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyterlab_pygments==0.3.0
jupyterlab_widgets==3.0.13
kiwisolver==1.4.7
MarkupSafe==3.0.2
matplotlib==3.9.4
matplotlib-inline==0.1.7
mistune==3.1.3
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nbsphinx==0.9.7
nbval==0.11.0
nest-asyncio==1.6.0
networkx==3.2.1
numpy==2.0.2
openpyxl==3.1.5
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pandocfilters==1.5.1
parso==0.8.4
pexpect==4.9.0
pillow==11.1.0
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
prompt_toolkit==3.0.50
psutil==7.0.0
ptyprocess==0.7.0
pure_eval==0.2.3
Pygments==2.19.1
pyparsing==3.2.3
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
python-ivi==0.14.9
pytz==2025.2
PyVISA==1.14.1
PyVISA-py==0.7.2
pyzmq==26.3.0
referencing==0.36.2
requests==2.32.3
rpds-py==0.24.0
-e git+https://github.com/scikit-rf/scikit-rf.git@ed9e7ac75b7911763c437e8e0499a7211fbfc418#egg=scikit_rf
scipy==1.13.1
six==1.17.0
snowballstemmer==2.2.0
soupsieve==2.6
Sphinx==7.4.7
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
stack-data==0.6.3
tinycss2==1.4.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tornado==6.4.2
traitlets==5.14.3
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
wcwidth==0.2.13
webencodings==0.5.1
widgetsnbextension==4.0.13
zipp==3.21.0
| name: scikit-rf
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- asttokens==3.0.0
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- comm==0.2.2
- contourpy==1.3.0
- coverage==7.8.0
- cycler==0.12.1
- debugpy==1.8.13
- decorator==5.2.1
- defusedxml==0.7.1
- docutils==0.21.2
- et-xmlfile==2.0.0
- execnet==2.1.1
- executing==2.2.0
- fastjsonschema==2.21.1
- fonttools==4.56.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- ipykernel==6.29.5
- ipython==8.18.1
- ipywidgets==8.1.5
- jedi==0.19.2
- jinja2==3.1.6
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyterlab-pygments==0.3.0
- jupyterlab-widgets==3.0.13
- kiwisolver==1.4.7
- markupsafe==3.0.2
- matplotlib==3.9.4
- matplotlib-inline==0.1.7
- mistune==3.1.3
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nbsphinx==0.9.7
- nbval==0.11.0
- nest-asyncio==1.6.0
- networkx==3.2.1
- numpy==2.0.2
- openpyxl==3.1.5
- pandas==2.2.3
- pandocfilters==1.5.1
- parso==0.8.4
- pexpect==4.9.0
- pillow==11.1.0
- platformdirs==4.3.7
- prompt-toolkit==3.0.50
- psutil==7.0.0
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pygments==2.19.1
- pyparsing==3.2.3
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- python-ivi==0.14.9
- pytz==2025.2
- pyvisa==1.14.1
- pyvisa-py==0.7.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rpds-py==0.24.0
- scikit-rf==1.4.0
- scipy==1.13.1
- six==1.17.0
- snowballstemmer==2.2.0
- soupsieve==2.6
- sphinx==7.4.7
- sphinx-rtd-theme==3.0.2
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- stack-data==0.6.3
- tinycss2==1.4.0
- tornado==6.4.2
- traitlets==5.14.3
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==2.3.0
- wcwidth==0.2.13
- webencodings==0.5.1
- widgetsnbextension==4.0.13
- zipp==3.21.0
prefix: /opt/conda/envs/scikit-rf
| [
"skrf/tests/test_network.py::NetworkTestCase::test_interpolate_cubic"
] | [] | [
"skrf/tests/test_network.py::NetworkTestCase::test_add",
"skrf/tests/test_network.py::NetworkTestCase::test_auto_use_bandpass",
"skrf/tests/test_network.py::NetworkTestCase::test_autogate",
"skrf/tests/test_network.py::NetworkTestCase::test_bpi",
"skrf/tests/test_network.py::NetworkTestCase::test_cascade",
"skrf/tests/test_network.py::NetworkTestCase::test_cascade2",
"skrf/tests/test_network.py::NetworkTestCase::test_concat_ports",
"skrf/tests/test_network.py::NetworkTestCase::test_connect",
"skrf/tests/test_network.py::NetworkTestCase::test_connect_complex_ports",
"skrf/tests/test_network.py::NetworkTestCase::test_connect_different_s_def",
"skrf/tests/test_network.py::NetworkTestCase::test_connect_drop_ext_attrs",
"skrf/tests/test_network.py::NetworkTestCase::test_connect_multiports",
"skrf/tests/test_network.py::NetworkTestCase::test_connect_no_frequency",
"skrf/tests/test_network.py::NetworkTestCase::test_connect_nport_2port",
"skrf/tests/test_network.py::NetworkTestCase::test_constructor_empty",
"skrf/tests/test_network.py::NetworkTestCase::test_constructor_from_fid_touchstone",
"skrf/tests/test_network.py::NetworkTestCase::test_constructor_from_hfss_touchstone",
"skrf/tests/test_network.py::NetworkTestCase::test_constructor_from_parameters",
"skrf/tests/test_network.py::NetworkTestCase::test_constructor_from_parameters2",
"skrf/tests/test_network.py::NetworkTestCase::test_constructor_from_pathlib",
"skrf/tests/test_network.py::NetworkTestCase::test_constructor_from_pickle",
"skrf/tests/test_network.py::NetworkTestCase::test_constructor_from_stringio",
"skrf/tests/test_network.py::NetworkTestCase::test_constructor_from_touchstone",
"skrf/tests/test_network.py::NetworkTestCase::test_constructor_from_touchstone_special_encoding",
"skrf/tests/test_network.py::NetworkTestCase::test_constructor_from_values",
"skrf/tests/test_network.py::NetworkTestCase::test_constructor_invalid_networks",
"skrf/tests/test_network.py::NetworkTestCase::test_conversions",
"skrf/tests/test_network.py::NetworkTestCase::test_dc_extrapolation_dc_sparam",
"skrf/tests/test_network.py::NetworkTestCase::test_de_embed_by_floordiv",
"skrf/tests/test_network.py::NetworkTestCase::test_de_embed_by_inv",
"skrf/tests/test_network.py::NetworkTestCase::test_delay",
"skrf/tests/test_network.py::NetworkTestCase::test_different_ext",
"skrf/tests/test_network.py::NetworkTestCase::test_div",
"skrf/tests/test_network.py::NetworkTestCase::test_equality",
"skrf/tests/test_network.py::NetworkTestCase::test_flip",
"skrf/tests/test_network.py::NetworkTestCase::test_fmt_trace_name",
"skrf/tests/test_network.py::NetworkTestCase::test_gain_circle",
"skrf/tests/test_network.py::NetworkTestCase::test_generate_subnetwork_nounderscore",
"skrf/tests/test_network.py::NetworkTestCase::test_generate_subnetworks_allports",
"skrf/tests/test_network.py::NetworkTestCase::test_generate_subnetworks_nportsabove10",
"skrf/tests/test_network.py::NetworkTestCase::test_generate_subnetworks_nportsbelow10",
"skrf/tests/test_network.py::NetworkTestCase::test_impulse_response_dirac",
"skrf/tests/test_network.py::NetworkTestCase::test_innerconnect_with_T",
"skrf/tests/test_network.py::NetworkTestCase::test_interconnect_complex_ports",
"skrf/tests/test_network.py::NetworkTestCase::test_interpolate_freq_cropped",
"skrf/tests/test_network.py::NetworkTestCase::test_interpolate_linear",
"skrf/tests/test_network.py::NetworkTestCase::test_interpolate_rational",
"skrf/tests/test_network.py::NetworkTestCase::test_interpolate_self",
"skrf/tests/test_network.py::NetworkTestCase::test_invalid_freq",
"skrf/tests/test_network.py::NetworkTestCase::test_is_lossless",
"skrf/tests/test_network.py::NetworkTestCase::test_is_passive",
"skrf/tests/test_network.py::NetworkTestCase::test_is_reciprocal",
"skrf/tests/test_network.py::NetworkTestCase::test_is_symmetric",
"skrf/tests/test_network.py::NetworkTestCase::test_lpi",
"skrf/tests/test_network.py::NetworkTestCase::test_lps",
"skrf/tests/test_network.py::NetworkTestCase::test_max_gain",
"skrf/tests/test_network.py::NetworkTestCase::test_max_stable_gain",
"skrf/tests/test_network.py::NetworkTestCase::test_mul",
"skrf/tests/test_network.py::NetworkTestCase::test_multiport_conversions",
"skrf/tests/test_network.py::NetworkTestCase::test_network_copy",
"skrf/tests/test_network.py::NetworkTestCase::test_network_empty_frequency_range",
"skrf/tests/test_network.py::NetworkTestCase::test_network_from_z_or_y",
"skrf/tests/test_network.py::NetworkTestCase::test_network_sequence_frequency_with_f_unit",
"skrf/tests/test_network.py::NetworkTestCase::test_nf_circle",
"skrf/tests/test_network.py::NetworkTestCase::test_noise",
"skrf/tests/test_network.py::NetworkTestCase::test_noise_dc_extrapolation",
"skrf/tests/test_network.py::NetworkTestCase::test_noise_deembed",
"skrf/tests/test_network.py::NetworkTestCase::test_noise_interpolation",
"skrf/tests/test_network.py::NetworkTestCase::test_open_saved_touchstone",
"skrf/tests/test_network.py::NetworkTestCase::test_parallelconnect",
"skrf/tests/test_network.py::NetworkTestCase::test_parallelconnect_inner",
"skrf/tests/test_network.py::NetworkTestCase::test_parallelconnect_mismatch",
"skrf/tests/test_network.py::NetworkTestCase::test_parallelconnect_open",
"skrf/tests/test_network.py::NetworkTestCase::test_pickling",
"skrf/tests/test_network.py::NetworkTestCase::test_plot_one_port_db",
"skrf/tests/test_network.py::NetworkTestCase::test_plot_one_port_deg",
"skrf/tests/test_network.py::NetworkTestCase::test_plot_one_port_smith",
"skrf/tests/test_network.py::NetworkTestCase::test_plot_two_port_db",
"skrf/tests/test_network.py::NetworkTestCase::test_plot_two_port_deg",
"skrf/tests/test_network.py::NetworkTestCase::test_plot_two_port_smith",
"skrf/tests/test_network.py::NetworkTestCase::test_renumber",
"skrf/tests/test_network.py::NetworkTestCase::test_s_active",
"skrf/tests/test_network.py::NetworkTestCase::test_s_def_setters",
"skrf/tests/test_network.py::NetworkTestCase::test_se2gmm",
"skrf/tests/test_network.py::NetworkTestCase::test_se2gmm2se",
"skrf/tests/test_network.py::NetworkTestCase::test_se2gmm_3port",
"skrf/tests/test_network.py::NetworkTestCase::test_se2gmm_renorm",
"skrf/tests/test_network.py::NetworkTestCase::test_setters",
"skrf/tests/test_network.py::NetworkTestCase::test_slicer",
"skrf/tests/test_network.py::NetworkTestCase::test_spar_interpolation",
"skrf/tests/test_network.py::NetworkTestCase::test_sparam_conversion_vs_sdefinition",
"skrf/tests/test_network.py::NetworkTestCase::test_sparam_conversion_with_complex_char_impedance",
"skrf/tests/test_network.py::NetworkTestCase::test_sparam_from_hfss_with_power_wave",
"skrf/tests/test_network.py::NetworkTestCase::test_sparam_renorm_different_z0",
"skrf/tests/test_network.py::NetworkTestCase::test_sparam_renorm_s2s",
"skrf/tests/test_network.py::NetworkTestCase::test_sparam_renormalize",
"skrf/tests/test_network.py::NetworkTestCase::test_stability",
"skrf/tests/test_network.py::NetworkTestCase::test_stability_circle",
"skrf/tests/test_network.py::NetworkTestCase::test_stitch",
"skrf/tests/test_network.py::NetworkTestCase::test_sub",
"skrf/tests/test_network.py::NetworkTestCase::test_subnetwork",
"skrf/tests/test_network.py::NetworkTestCase::test_subnetwork_port_names",
"skrf/tests/test_network.py::NetworkTestCase::test_time_gate",
"skrf/tests/test_network.py::NetworkTestCase::test_time_gate_custom_window",
"skrf/tests/test_network.py::NetworkTestCase::test_time_gate_raises",
"skrf/tests/test_network.py::NetworkTestCase::test_time_transform",
"skrf/tests/test_network.py::NetworkTestCase::test_time_transform_multiport",
"skrf/tests/test_network.py::NetworkTestCase::test_time_transform_nonlinear_f",
"skrf/tests/test_network.py::NetworkTestCase::test_time_transform_squeeze",
"skrf/tests/test_network.py::NetworkTestCase::test_time_transform_v2",
"skrf/tests/test_network.py::NetworkTestCase::test_timedomain",
"skrf/tests/test_network.py::NetworkTestCase::test_two_port_reflect",
"skrf/tests/test_network.py::NetworkTestCase::test_twport_to_nport",
"skrf/tests/test_network.py::NetworkTestCase::test_unilateral_gain",
"skrf/tests/test_network.py::NetworkTestCase::test_unknown_s_def",
"skrf/tests/test_network.py::NetworkTestCase::test_vswr_active",
"skrf/tests/test_network.py::NetworkTestCase::test_write_touchstone",
"skrf/tests/test_network.py::NetworkTestCase::test_write_touchstone_noisy",
"skrf/tests/test_network.py::NetworkTestCase::test_y_z_compatability",
"skrf/tests/test_network.py::NetworkTestCase::test_yz",
"skrf/tests/test_network.py::NetworkTestCase::test_z0_assign",
"skrf/tests/test_network.py::NetworkTestCase::test_z0_matrix",
"skrf/tests/test_network.py::NetworkTestCase::test_z0_pure_imaginary",
"skrf/tests/test_network.py::NetworkTestCase::test_z0_scalar",
"skrf/tests/test_network.py::NetworkTestCase::test_z0_vector",
"skrf/tests/test_network.py::NetworkTestCase::test_zipped_touchstone",
"skrf/tests/test_network.py::NetworkTestCase::test_zy_singularities"
] | [] | BSD 3-Clause "New" or "Revised" License | 20,144 | 124 | [
"skrf/network.py"
] |
|
movingpandas__movingpandas-428 | 843ef836d789074849c5e7f5a297fb747aa38801 | 2024-11-05 18:50:11 | 45c9a3cd5040af17e4f361794dd8bfb0e44b1951 | diff --git a/movingpandas/trajectory.py b/movingpandas/trajectory.py
index 9b23cbf..48d01e4 100644
--- a/movingpandas/trajectory.py
+++ b/movingpandas/trajectory.py
@@ -1369,7 +1369,8 @@ class Trajectory:
except ValueError as e:
raise e
# set the distance in the first row to zero
- temp_df.at[self.get_start_time(), name] = 0
+ t0 = self.df.index.min().to_datetime64()
+ temp_df.at[t0, name] = 0
temp_df = temp_df.drop(columns=["prev_pt"])
return temp_df
@@ -1383,7 +1384,8 @@ class Trajectory:
except ValueError as e:
raise e
# set the speed in the first row to the speed of the second row
- temp_df.at[self.get_start_time(), name] = temp_df.iloc[1][name]
+ t0 = self.df.index.min().to_datetime64()
+ temp_df.at[t0, name] = temp_df.iloc[1][name]
temp_df = temp_df.drop(columns=["prev_pt", "delta_t"])
return temp_df
@@ -1396,7 +1398,8 @@ class Trajectory:
)
# set the acceleration in the first row to the acceleration of the
# second row
- temp_df.at[self.get_start_time(), name] = temp_df.iloc[1][name]
+ t0 = self.df.index.min().to_datetime64()
+ temp_df.at[t0, name] = temp_df.iloc[1][name]
return temp_df.drop(columns=["speed_temp"])
def intersects(self, polygon):
| set_speed inaccurate with floating point (nanosecond) datetime values results in unmitigatable error
```python
# Your code here
```
#### Problem description
Trajectories that contain a datetime that has floating point seconds value results in the dataframe of the trajectory getting one value withput cordinates added when calculating speed .

This results in

This is due to the code in trajectory.py line 1328
```python
temp_df.at[self.get_start_time(), name] = temp_df.iloc[1][name]
```
As ``` self.get_start_time()``` reports the seconds value in integers:
```
IPdb[16]: self.get_start_time()
Out [16]: datetime.datetime(2024, 10, 25, 7, 39, 31, 250000)
```
However, in the assignment in line 1328 the
```temp_df.at[self.get_start_time(), name]" ``` does not find the correct value in the index and thus defaults to adding another row to the dataframe (see screenshot up there)
Visible when just calling the "startime" of the dataframe:
```
temp_df.index[0]
Out [19]: Timestamp('2024-10-25 07:39:31.250000477')
```
It seems that
```return self.df.index.min().to_pydatetime()``` is swallowing the last values due to python datetime only supporting microseconds.
#### Output of ``movingpandas.show_versions()``
<details>
MovingPandas 0.18.1
SYSTEM INFO
-----------
python : 3.10.14 | packaged by Anaconda, Inc. | (main, May 6 2024, 19:44:50) [MSC v.1916 64 bit (AMD64)]
executable : C:\Users\Riedel\.conda\envs\tracjectory\python.exe
machine : Windows-10-10.0.19045-SP0
GEOS, GDAL, PROJ INFO
---------------------
GEOS : None
GEOS lib : None
GDAL : 3.6.2
GDAL data dir: None
PROJ : 9.3.1
PROJ data dir: C:\Users\Riedel\.conda\envs\tracjectory\Library\share\proj
PYTHON DEPENDENCIES
-------------------
geopandas : 0.14.2
pandas : 2.2.2
fiona : 1.9.5
numpy : 1.26.4
shapely : 2.0.1
rtree : 1.0.1
pyproj : 3.6.1
matplotlib : 3.7.1
mapclassify: 2.5.0
geopy : 2.4.1
holoviews : 1.19.1
hvplot : 0.10.0
geoviews : 1.12.0
stonesoup : None
</details>
| movingpandas/movingpandas | diff --git a/movingpandas/tests/test_trajectory.py b/movingpandas/tests/test_trajectory.py
index b19c91c..c818ee1 100644
--- a/movingpandas/tests/test_trajectory.py
+++ b/movingpandas/tests/test_trajectory.py
@@ -477,6 +477,19 @@ class TestTrajectory:
traj.add_speed()
traj.add_speed(overwrite=True)
+ def test_add_speed_with_nanoseconds(self):
+ import numpy as np
+
+ start_time = pd.Timestamp.now() + pd.Timedelta(nanoseconds=10)
+ timedeltas = np.arange(10) * pd.Timedelta(seconds=0.2)
+ timestamps = start_time + timedeltas
+ df = pd.DataFrame({"datetime": timestamps})
+ df["x"] = np.arange(0, 10) * 100
+ df["y"] = np.arange(0, 10) * 100
+ traj = Trajectory(df, traj_id=1, t="datetime", y="y", x="x", crs="epsg:32632")
+ traj.add_speed()
+ assert len(traj.df) == 10
+
def test_add_acceleration(self):
traj = make_traj([Node(0, 0), Node(6, 0, second=1), Node(18, 0, second=2)])
traj.add_acceleration()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.20 | {
"env_vars": null,
"env_yml_path": [
"environment.yml"
],
"install": "python setup.py develop",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "environment.yml",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.10",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | affine @ file:///home/conda/feedstock_root/build_artifacts/affine_1733762038348/work
attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1741918516150/work
autocommand==2.2.2
backports.tarfile==1.2.0
black @ file:///home/conda/feedstock_root/build_artifacts/black-recipe_1742502760723/work
bleach @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_bleach_1737382993/work
bokeh @ file:///home/conda/feedstock_root/build_artifacts/bokeh_1741848529421/work
branca @ file:///home/conda/feedstock_root/build_artifacts/branca_1734433375112/work
Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1725267488082/work
Cartopy @ file:///home/conda/feedstock_root/build_artifacts/cartopy_1728342231186/work
certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1739515848642/work/certifi
cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1725560520483/work
cfgv @ file:///home/conda/feedstock_root/build_artifacts/cfgv_1734267080977/work
cftime @ file:///home/conda/feedstock_root/build_artifacts/cftime_1725400453617/work
charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1735929714516/work
click @ file:///home/conda/feedstock_root/build_artifacts/click_1734858813237/work
click-plugins @ file:///home/conda/feedstock_root/build_artifacts/click-plugins_1733731077999/work
cligj @ file:///home/conda/feedstock_root/build_artifacts/cligj_1733749956636/work
codecov @ file:///home/conda/feedstock_root/build_artifacts/codecov_1734975286850/work
colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1733218098505/work
colorcet @ file:///home/conda/feedstock_root/build_artifacts/colorcet_1734007314889/work
contourpy @ file:///home/conda/feedstock_root/build_artifacts/contourpy_1731428322366/work
coverage @ file:///home/conda/feedstock_root/build_artifacts/coverage_1743381215370/work
cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1733332471406/work
Cython @ file:///home/conda/feedstock_root/build_artifacts/cython_1739227941089/work
datashader @ file:///home/conda/feedstock_root/build_artifacts/datashader_1738222726141/work
distlib @ file:///home/conda/feedstock_root/build_artifacts/distlib_1733238395481/work
exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1733208806608/work
filelock @ file:///home/conda/feedstock_root/build_artifacts/filelock_1741969488311/work
flake8 @ file:///home/conda/feedstock_root/build_artifacts/flake8_1739898391164/work
folium @ file:///home/conda/feedstock_root/build_artifacts/folium_1740766619747/work
fonttools @ file:///home/conda/feedstock_root/build_artifacts/fonttools_1738940303262/work
geographiclib @ file:///home/conda/feedstock_root/build_artifacts/geographiclib_1734342349249/work
geopandas @ file:///home/conda/feedstock_root/build_artifacts/geopandas_1734346029138/work
geopy @ file:///home/conda/feedstock_root/build_artifacts/geopy_1734341931581/work
geoviews @ file:///home/conda/feedstock_root/build_artifacts/geoviews-core_1734509456819/work
h2 @ file:///home/conda/feedstock_root/build_artifacts/h2_1738578511449/work
holoviews @ file:///home/conda/feedstock_root/build_artifacts/holoviews_1741872276401/work
hpack @ file:///home/conda/feedstock_root/build_artifacts/hpack_1737618293087/work
hvplot @ file:///home/conda/feedstock_root/build_artifacts/hvplot_1734388686140/work
hyperframe @ file:///home/conda/feedstock_root/build_artifacts/hyperframe_1737618333194/work
identify @ file:///home/conda/feedstock_root/build_artifacts/identify_1741502659866/work
idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1733211830134/work
importlib_metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1737420181517/work
inflect==7.3.1
iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1733223141826/work
jaraco.collections==5.1.0
jaraco.context==5.3.0
jaraco.functools==4.0.1
jaraco.text==3.12.1
Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1741263328855/work
joblib @ file:///home/conda/feedstock_root/build_artifacts/joblib_1733736026804/work
kiwisolver @ file:///home/conda/feedstock_root/build_artifacts/kiwisolver_1725459213453/work
linkify-it-py @ file:///home/conda/feedstock_root/build_artifacts/linkify-it-py_1733781180837/work
llvmlite==0.44.0
mapclassify @ file:///home/conda/feedstock_root/build_artifacts/mapclassify_1733731066416/work
Markdown @ file:///home/conda/feedstock_root/build_artifacts/markdown_1710435156458/work
markdown-it-py @ file:///home/conda/feedstock_root/build_artifacts/markdown-it-py_1733250460757/work
MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1733219680183/work
matplotlib==3.10.1
mccabe @ file:///home/conda/feedstock_root/build_artifacts/mccabe_1733216466933/work
mdit-py-plugins @ file:///home/conda/feedstock_root/build_artifacts/mdit-py-plugins_1733854715505/work
mdurl @ file:///home/conda/feedstock_root/build_artifacts/mdurl_1733255585584/work
mergedeep @ file:///home/conda/feedstock_root/build_artifacts/mergedeep_1734156985434/work
more-itertools==10.3.0
-e git+https://github.com/movingpandas/movingpandas.git@843ef836d789074849c5e7f5a297fb747aa38801#egg=movingpandas
multipledispatch @ file:///home/conda/feedstock_root/build_artifacts/multipledispatch_1721907546485/work
munkres==1.1.4
mypy_extensions @ file:///home/conda/feedstock_root/build_artifacts/mypy_extensions_1733230897951/work
narwhals @ file:///home/conda/feedstock_root/build_artifacts/narwhals_1742841036354/work
netCDF4 @ file:///home/conda/feedstock_root/build_artifacts/netcdf4_1733253079498/work
networkx @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_networkx_1731521053/work
nodeenv @ file:///home/conda/feedstock_root/build_artifacts/nodeenv_1734112117269/work
numba @ file:///home/conda/feedstock_root/build_artifacts/numba_1739224673889/work
numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1730588026426/work/dist/numpy-2.1.3-cp310-cp310-linux_x86_64.whl#sha256=589b636d43e6856ab6eab6a1a707eef18871d272782f92400ea890827e0bf64b
ordered-set @ file:///home/conda/feedstock_root/build_artifacts/ordered-set_1733927903326/work
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1733203243479/work
pandas @ file:///home/conda/feedstock_root/build_artifacts/pandas_1726878398774/work
panel @ file:///home/conda/feedstock_root/build_artifacts/panel_1743356992402/work
param @ file:///home/conda/feedstock_root/build_artifacts/param_1734441041763/work
pathspec @ file:///home/conda/feedstock_root/build_artifacts/pathspec_1733233363808/work
patsy @ file:///home/conda/feedstock_root/build_artifacts/patsy_1733792384640/work
pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1735929693232/work
platformdirs @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_platformdirs_1742485085/work
plotly @ file:///home/conda/feedstock_root/build_artifacts/plotly_1742240435426/work
pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1733222765875/work
pre_commit @ file:///home/conda/feedstock_root/build_artifacts/pre-commit_1742475552107/work
pycodestyle @ file:///home/conda/feedstock_root/build_artifacts/pycodestyle_1733216196861/work
pycparser @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_pycparser_1733195786/work
pyct @ file:///home/conda/feedstock_root/build_artifacts/pyct_1734342495516/work
pyflakes @ file:///home/conda/feedstock_root/build_artifacts/pyflakes_1733216066937/work
pymap3d @ file:///home/conda/feedstock_root/build_artifacts/pymap3d_1735112850856/work
pyogrio @ file:///home/conda/feedstock_root/build_artifacts/pyogrio_1732013380254/work
pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1743089729650/work
pyproj @ file:///home/conda/feedstock_root/build_artifacts/pyproj_1742323235700/work
pyshp @ file:///home/conda/feedstock_root/build_artifacts/pyshp_1733821528126/work
PySide6==6.8.3
PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1733217236728/work
pytest @ file:///home/conda/feedstock_root/build_artifacts/pytest_1740946542080/work
pytest-cov @ file:///home/conda/feedstock_root/build_artifacts/pytest-cov_1733223023082/work
python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1733215673016/work
pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1706886791323/work
pyviz_comms @ file:///home/conda/feedstock_root/build_artifacts/pyviz_comms_1736890319493/work
PyYAML @ file:///home/conda/feedstock_root/build_artifacts/pyyaml_1737454647378/work
rasterio @ file:///home/conda/feedstock_root/build_artifacts/rasterio_1742428573226/work
requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1733217035951/work
rtree @ file:///home/conda/feedstock_root/build_artifacts/rtree_1741378561624/work
ruamel.yaml @ file:///home/conda/feedstock_root/build_artifacts/ruamel.yaml_1736248022260/work
ruamel.yaml.clib @ file:///home/conda/feedstock_root/build_artifacts/ruamel.yaml.clib_1728724455198/work
scikit-learn @ file:///home/conda/feedstock_root/build_artifacts/scikit-learn_1736496756180/work/dist/scikit_learn-1.6.1-cp310-cp310-linux_x86_64.whl#sha256=8b3481924bda36bf9a85c5f500f48e43e1178ead014b2d2ecf10f7b9b49935b4
scipy @ file:///home/conda/feedstock_root/build_artifacts/scipy-split_1739790642651/work/dist/scipy-1.15.2-cp310-cp310-linux_x86_64.whl#sha256=9e52bad6c3294d1a5b04a13632118ca2157130603c6c018c2d710162b223b27e
seaborn @ file:///home/conda/feedstock_root/build_artifacts/seaborn-split_1733730015268/work
shapely @ file:///home/conda/feedstock_root/build_artifacts/shapely_1741166961581/work
shiboken6==6.8.3
six @ file:///home/conda/feedstock_root/build_artifacts/six_1733380938961/work
snuggs @ file:///home/conda/feedstock_root/build_artifacts/snuggs_1733818638588/work
statsmodels @ file:///home/conda/feedstock_root/build_artifacts/statsmodels_1727986707121/work
stonesoup @ file:///home/conda/feedstock_root/build_artifacts/stonesoup_1741796333669/work
threadpoolctl @ file:///home/conda/feedstock_root/build_artifacts/threadpoolctl_1741878222898/work
toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1734091811753/work
tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1733256695513/work
toolz @ file:///home/conda/feedstock_root/build_artifacts/toolz_1733736030883/work
tornado @ file:///home/conda/feedstock_root/build_artifacts/tornado_1732615898999/work
tqdm @ file:///home/conda/feedstock_root/build_artifacts/tqdm_1735661334605/work
typeguard==4.3.0
typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_typing_extensions_1743201626/work
tzdata @ file:///home/conda/feedstock_root/build_artifacts/python-tzdata_1742745135198/work
uc-micro-py @ file:///home/conda/feedstock_root/build_artifacts/uc-micro-py_1733784165198/work
ukkonen @ file:///home/conda/feedstock_root/build_artifacts/ukkonen_1725784026316/work
unicodedata2 @ file:///home/conda/feedstock_root/build_artifacts/unicodedata2_1736692496989/work
urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1734859416348/work
utm @ file:///home/conda/feedstock_root/build_artifacts/utm_1621882749246/work
virtualenv @ file:///home/conda/feedstock_root/build_artifacts/virtualenv_1741337798015/work
webencodings @ file:///home/conda/feedstock_root/build_artifacts/webencodings_1733236011802/work
xarray @ file:///home/conda/feedstock_root/build_artifacts/xarray_1742448343846/work
xyzservices @ file:///home/conda/feedstock_root/build_artifacts/xyzservices_1737234886776/work
zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1732827521216/work
zstandard==0.23.0
| name: movingpandas
channels:
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_gnu
- affine=2.4.0=pyhd8ed1ab_1
- alsa-lib=1.2.13=hb9d3cd8_0
- aom=3.9.1=hac33072_0
- attrs=25.3.0=pyh71513ae_0
- black=25.1.0=pyha5154f8_0
- bleach=6.2.0=pyh29332c3_4
- blosc=1.21.6=he440d0b_1
- bokeh=3.7.0=pyhd8ed1ab_0
- branca=0.8.1=pyhd8ed1ab_0
- brotli=1.1.0=hb9d3cd8_2
- brotli-bin=1.1.0=hb9d3cd8_2
- brotli-python=1.1.0=py310hf71b8c6_2
- bzip2=1.0.8=h4bc722e_7
- c-ares=1.34.4=hb9d3cd8_0
- ca-certificates=2025.1.31=hbcca054_0
- cairo=1.18.4=h3394656_0
- cartopy=0.24.0=py310h5eaa309_0
- certifi=2025.1.31=pyhd8ed1ab_0
- cffi=1.17.1=py310h8deb56e_0
- cfgv=3.3.1=pyhd8ed1ab_1
- cftime=1.6.4=py310hf462985_1
- charset-normalizer=3.4.1=pyhd8ed1ab_0
- click=8.1.8=pyh707e725_0
- click-plugins=1.1.1=pyhd8ed1ab_1
- cligj=0.7.2=pyhd8ed1ab_2
- codecov=2.1.13=pyhd8ed1ab_1
- colorama=0.4.6=pyhd8ed1ab_1
- colorcet=3.1.0=pyhd8ed1ab_1
- contourpy=1.3.1=py310h3788b33_0
- coverage=7.8.0=py310h89163eb_0
- cycler=0.12.1=pyhd8ed1ab_1
- cyrus-sasl=2.1.27=h54b06d7_7
- cython=3.0.12=py310had8cdd9_0
- datashader=0.17.0=pyhd8ed1ab_0
- dav1d=1.2.1=hd590300_0
- dbus=1.13.6=h5008d03_3
- distlib=0.3.9=pyhd8ed1ab_1
- double-conversion=3.3.1=h5888daf_0
- exceptiongroup=1.2.2=pyhd8ed1ab_1
- expat=2.6.4=h5888daf_0
- filelock=3.18.0=pyhd8ed1ab_0
- flake8=7.1.2=pyhd8ed1ab_0
- folium=0.19.5=pyhd8ed1ab_0
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=h77eed37_3
- fontconfig=2.15.0=h7e30c49_1
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- fonttools=4.56.0=py310h89163eb_0
- freetype=2.13.3=h48d6fc4_0
- freexl=2.0.0=h9dce30a_2
- geographiclib=2.0=pyhd8ed1ab_1
- geopandas=1.0.1=pyhd8ed1ab_3
- geopandas-base=1.0.1=pyha770c72_3
- geopy=2.4.1=pyhd8ed1ab_2
- geos=3.13.1=h97f6797_0
- geotiff=1.7.4=h239500f_2
- geoviews=1.14.0=hd8ed1ab_0
- geoviews-core=1.14.0=pyha770c72_0
- giflib=5.2.2=hd590300_0
- graphite2=1.3.13=h59595ed_1003
- gtest=1.16.0=h84d6215_0
- h2=4.2.0=pyhd8ed1ab_0
- harfbuzz=11.0.0=h76408a6_0
- hdf4=4.2.15=h2a13503_7
- hdf5=1.14.4=nompi_h2d575fe_105
- holoviews=1.20.2=pyhd8ed1ab_0
- hpack=4.1.0=pyhd8ed1ab_0
- hvplot=0.11.2=pyhd8ed1ab_0
- hyperframe=6.1.0=pyhd8ed1ab_0
- icu=75.1=he02047a_0
- identify=2.6.9=pyhd8ed1ab_0
- idna=3.10=pyhd8ed1ab_1
- importlib-metadata=8.6.1=pyha770c72_0
- iniconfig=2.0.0=pyhd8ed1ab_1
- jinja2=3.1.6=pyhd8ed1ab_0
- joblib=1.4.2=pyhd8ed1ab_1
- json-c=0.18=h6688a6e_0
- keyutils=1.6.1=h166bdaf_0
- kiwisolver=1.4.7=py310h3788b33_0
- krb5=1.21.3=h659f571_0
- lcms2=2.17=h717163a_0
- ld_impl_linux-64=2.43=h712a8e2_4
- lerc=4.0.0=h27087fc_0
- libaec=1.1.3=h59595ed_0
- libarchive=3.7.7=h4585015_3
- libavif16=1.2.1=hbb36593_2
- libblas=3.9.0=31_h59b9bed_openblas
- libbrotlicommon=1.1.0=hb9d3cd8_2
- libbrotlidec=1.1.0=hb9d3cd8_2
- libbrotlienc=1.1.0=hb9d3cd8_2
- libcblas=3.9.0=31_he106b2a_openblas
- libclang-cpp20.1=20.1.1=default_hb5137d0_0
- libclang13=20.1.1=default_h9c6a7e4_0
- libcups=2.3.3=h4637d8d_4
- libcurl=8.12.1=h332b0f4_0
- libde265=1.0.15=h00ab1b0_0
- libdeflate=1.23=h4ddbbb0_0
- libdrm=2.4.124=hb9d3cd8_0
- libedit=3.1.20250104=pl5321h7949ede_0
- libegl=1.7.0=ha4b6fd6_2
- libev=4.33=hd590300_2
- libexpat=2.6.4=h5888daf_0
- libffi=3.4.6=h2dba641_0
- libgcc=14.2.0=h767d61c_2
- libgcc-ng=14.2.0=h69a702a_2
- libgdal-core=3.10.2=hae73b24_5
- libgfortran=14.2.0=h69a702a_2
- libgfortran5=14.2.0=hf1ad2bd_2
- libgl=1.7.0=ha4b6fd6_2
- libglib=2.84.0=h2ff4ddf_0
- libglvnd=1.7.0=ha4b6fd6_2
- libglx=1.7.0=ha4b6fd6_2
- libgomp=14.2.0=h767d61c_2
- libheif=1.19.7=gpl_hc18d805_100
- libiconv=1.18=h4ce23a2_1
- libjpeg-turbo=3.0.0=hd590300_1
- libkml=1.3.0=hf539b9f_1021
- liblapack=3.9.0=31_h7ac8fdf_openblas
- libllvm20=20.1.1=ha7bfdaf_0
- liblzma=5.6.4=hb9d3cd8_0
- libnetcdf=4.9.2=nompi_h5ddbaa4_116
- libnghttp2=1.64.0=h161d5f1_0
- libnsl=2.0.1=hd590300_0
- libntlm=1.8=hb9d3cd8_0
- libopenblas=0.3.29=pthreads_h94d23a6_0
- libopengl=1.7.0=ha4b6fd6_2
- libpciaccess=0.18=hd590300_0
- libpng=1.6.47=h943b412_0
- libpq=17.4=h27ae623_0
- librttopo=1.1.0=hd718a1a_18
- libspatialindex=2.1.0=he57a185_0
- libspatialite=5.1.0=he17ca71_14
- libsqlite=3.49.1=hee588c1_2
- libssh2=1.11.1=hf672d98_0
- libstdcxx=14.2.0=h8f9b012_2
- libstdcxx-ng=14.2.0=h4852527_2
- libtiff=4.7.0=hd9ff511_3
- libuuid=2.38.1=h0b41bf4_0
- libwebp-base=1.5.0=h851e524_0
- libxcb=1.17.0=h8a09558_0
- libxcrypt=4.4.36=hd590300_1
- libxkbcommon=1.8.1=hc4a0caf_0
- libxml2=2.13.7=h8d12d68_0
- libxslt=1.1.39=h76b75d6_0
- libzip=1.11.2=h6991a6a_0
- libzlib=1.3.1=hb9d3cd8_2
- linkify-it-py=2.0.3=pyhd8ed1ab_1
- llvmlite=0.44.0=py310h1a6248f_1
- lz4-c=1.10.0=h5888daf_1
- lzo=2.10=hd590300_1001
- mapclassify=2.8.1=pyhd8ed1ab_1
- markdown=3.6=pyhd8ed1ab_0
- markdown-it-py=3.0.0=pyhd8ed1ab_1
- markupsafe=3.0.2=py310h89163eb_1
- matplotlib=3.10.1=py310hff52083_0
- matplotlib-base=3.10.1=py310h68603db_0
- mccabe=0.7.0=pyhd8ed1ab_1
- mdit-py-plugins=0.4.2=pyhd8ed1ab_1
- mdurl=0.1.2=pyhd8ed1ab_1
- mergedeep=1.3.4=pyhd8ed1ab_1
- minizip=4.0.7=h05a5f5f_3
- multipledispatch=0.6.0=pyhd8ed1ab_1
- munkres=1.1.4=pyh9f0ad1d_0
- mypy_extensions=1.0.0=pyha770c72_1
- mysql-common=9.0.1=h266115a_5
- mysql-libs=9.0.1=he0572af_5
- narwhals=1.32.0=pyhd8ed1ab_0
- ncurses=6.5=h2d0b736_3
- netcdf4=1.7.2=nompi_py310h5146f0f_101
- networkx=3.4.2=pyh267e887_2
- nodeenv=1.9.1=pyhd8ed1ab_1
- numba=0.61.0=py310h699fe88_1
- numpy=2.1.3=py310hd6e36ab_0
- openjpeg=2.5.3=h5fbd93e_0
- openldap=2.6.9=he970967_0
- openssl=3.4.1=h7b32b05_0
- ordered-set=4.1.0=pyhd8ed1ab_1
- packaging=24.2=pyhd8ed1ab_2
- pandas=2.2.3=py310h5eaa309_1
- panel=1.6.2=pyhd8ed1ab_0
- param=2.2.0=pyhd8ed1ab_0
- pathspec=0.12.1=pyhd8ed1ab_1
- patsy=1.0.1=pyhd8ed1ab_1
- pcre2=10.44=hba22ea6_2
- pillow=11.1.0=py310h7e6dc6c_0
- pip=25.0.1=pyh8b19718_0
- pixman=0.44.2=h29eaf8c_0
- platformdirs=4.3.7=pyh29332c3_0
- plotly=6.0.1=pyhd8ed1ab_0
- pluggy=1.5.0=pyhd8ed1ab_1
- pre-commit=4.2.0=pyha770c72_0
- proj=9.6.0=h0054346_0
- pthread-stubs=0.4=hb9d3cd8_1002
- pycodestyle=2.12.1=pyhd8ed1ab_1
- pycparser=2.22=pyh29332c3_1
- pyct=0.5.0=pyhd8ed1ab_1
- pyflakes=3.2.0=pyhd8ed1ab_1
- pymap3d=3.1.0=pyhd8ed1ab_1
- pyogrio=0.10.0=py310h0aed7a2_1
- pyparsing=3.2.3=pyhd8ed1ab_1
- pyproj=3.7.1=py310h71d0299_1
- pyshp=2.3.1=pyhd8ed1ab_1
- pyside6=6.8.3=py310hfd10a26_0
- pysocks=1.7.1=pyha55dd90_7
- pytest=8.3.5=pyhd8ed1ab_0
- pytest-cov=6.0.0=pyhd8ed1ab_1
- python=3.10.16=he725a3c_1_cpython
- python-dateutil=2.9.0.post0=pyhff2d567_1
- python-tzdata=2025.2=pyhd8ed1ab_0
- python_abi=3.10=5_cp310
- pytz=2024.1=pyhd8ed1ab_0
- pyviz_comms=3.0.4=pyhd8ed1ab_1
- pyyaml=6.0.2=py310h89163eb_2
- qhull=2020.2=h434a139_5
- qt6-main=6.8.3=h6441bc3_1
- rasterio=1.4.3=py310hb0078ae_1
- rav1e=0.6.6=he8a937b_2
- readline=8.2=h8c095d6_2
- requests=2.32.3=pyhd8ed1ab_1
- rtree=1.4.0=pyh11ca60a_1
- ruamel.yaml=0.18.10=py310ha75aee5_0
- ruamel.yaml.clib=0.2.8=py310ha75aee5_1
- scikit-learn=1.6.1=py310h27f47ee_0
- scipy=1.15.2=py310h1d65ade_0
- seaborn=0.13.2=hd8ed1ab_3
- seaborn-base=0.13.2=pyhd8ed1ab_3
- setuptools=75.8.2=pyhff2d567_0
- shapely=2.0.7=py310h247727d_1
- six=1.17.0=pyhd8ed1ab_0
- snappy=1.2.1=h8bd8927_1
- snuggs=1.4.7=pyhd8ed1ab_2
- sqlite=3.49.1=h9eae976_2
- statsmodels=0.14.4=py310hf462985_0
- stonesoup=1.6=pyhd8ed1ab_0
- svt-av1=3.0.2=h5888daf_0
- threadpoolctl=3.6.0=pyhecae5ae_0
- tk=8.6.13=noxft_h4845f30_101
- toml=0.10.2=pyhd8ed1ab_1
- tomli=2.2.1=pyhd8ed1ab_1
- toolz=1.0.0=pyhd8ed1ab_1
- tornado=6.4.2=py310ha75aee5_0
- tqdm=4.67.1=pyhd8ed1ab_1
- typing_extensions=4.13.0=pyh29332c3_1
- tzdata=2025b=h78e105d_0
- uc-micro-py=1.0.3=pyhd8ed1ab_1
- ukkonen=1.0.1=py310h3788b33_5
- unicodedata2=16.0.0=py310ha75aee5_0
- uriparser=0.9.8=hac33072_0
- urllib3=2.3.0=pyhd8ed1ab_0
- utm=0.7.0=pyhd8ed1ab_0
- virtualenv=20.29.3=pyhd8ed1ab_0
- wayland=1.23.1=h3e06ad9_0
- webencodings=0.5.1=pyhd8ed1ab_3
- wheel=0.45.1=pyhd8ed1ab_1
- x265=3.5=h924138e_3
- xarray=2025.3.0=pyhd8ed1ab_0
- xcb-util=0.4.1=hb711507_2
- xcb-util-cursor=0.1.5=hb9d3cd8_0
- xcb-util-image=0.4.0=hb711507_2
- xcb-util-keysyms=0.4.1=hb711507_0
- xcb-util-renderutil=0.3.10=hb711507_0
- xcb-util-wm=0.4.2=hb711507_0
- xerces-c=3.2.5=h988505b_2
- xkeyboard-config=2.43=hb9d3cd8_0
- xorg-libice=1.1.2=hb9d3cd8_0
- xorg-libsm=1.2.6=he73a12e_0
- xorg-libx11=1.8.12=h4f16b4b_0
- xorg-libxau=1.0.12=hb9d3cd8_0
- xorg-libxcomposite=0.4.6=hb9d3cd8_2
- xorg-libxcursor=1.2.3=hb9d3cd8_0
- xorg-libxdamage=1.1.6=hb9d3cd8_0
- xorg-libxdmcp=1.1.5=hb9d3cd8_0
- xorg-libxext=1.3.6=hb9d3cd8_0
- xorg-libxfixes=6.0.1=hb9d3cd8_0
- xorg-libxi=1.8.2=hb9d3cd8_0
- xorg-libxrandr=1.5.4=hb9d3cd8_0
- xorg-libxrender=0.9.12=hb9d3cd8_0
- xorg-libxtst=1.2.5=hb9d3cd8_3
- xorg-libxxf86vm=1.1.6=hb9d3cd8_0
- xyzservices=2025.1.0=pyhd8ed1ab_0
- yaml=0.2.5=h7f98852_2
- zipp=3.21.0=pyhd8ed1ab_1
- zlib=1.3.1=hb9d3cd8_2
- zstandard=0.23.0=py310ha75aee5_1
- zstd=1.5.7=hb8e6e7a_2
prefix: /opt/conda/envs/movingpandas
| [
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_with_nanoseconds"
] | [] | [
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_timezone_info_drop",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_latlon",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_without_crs",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_str",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_size",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_endlocation",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_write_linestring_wkt",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_write_linestring_m_wkt_with_unix_time",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_position_at_existing_timestamp",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_postion_at_out_of_time_range",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_position_with_invalid_method",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_interpolated_position_at_existing_timestamp",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_position_of_nearest_timestamp_1",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_position_of_nearest_timestamp_2",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_position_interpolated_at_timestamp_1",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_position_interpolated_at_timestamp_2",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_segment_between_existing_timestamps",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_segment_between_new_timestamps",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_segment_between_start_and_end",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_linestring_between_interpolate",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_linestring_between_interpolate_existing_timestamps",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_linestring_between_interpolate_ValueError",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_linestring_between_within",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_traj_id",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_traj_id_overwrite_raises_error",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_traj_id_can_overwrite",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_direction",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_direction_with_name",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_direction_doesnt_change_existing_direction",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_direction_only_adds_direction_col_and_doesnt_otherwise_alter_df",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_direction_latlon",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_direction_can_overwrite",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_direction_overwrite_raises_error",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_angular_difference",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_angular_difference_with_name",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_angular_difference_can_overwrite",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_angular_difference_overwrite_raises_error",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_ag_doesnt_change_existing_ag",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_ag_only_adds_ag_column_and_doesnt_otherwise_alter_df",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_ag_keeps_existing_direction",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_with_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_without_crs",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_with_units_without_crs",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_can_overwrite",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_overwrite_raises_error",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_with_name",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_with_only_distance_units_and_name",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_with_extra_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_doesnt_change_existing_speed",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_only_adds_speed_column_and_doesnt_otherwise_alter_df",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_latlon",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_with_units_latlon",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_speed_latlon_numerical_issues",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_with_distance_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_with_distance_and_time_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_with_distance_and_both_time_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_without_crs",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_with_distance_units_with_name_no_crs",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_with_distance_and_time_units_with_name_no_crs",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_with_distance_and_both_time_units_with_name_no_crs",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_with_distance_and_both_time_units_latlon",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_with_wrong_distance_units_raises_error",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_with_wrong_time_units_raises_error",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_with_wrong_time2_units_raises_error",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_can_overwrite",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_overwrite_raises_error",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_with_name",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_doesnt_change_existing_acceleration",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_accel_only_adds_accel_column_and_doesnt_otherwise_alter_df",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_acceleration_keeps_existing_speed",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_with_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_with_extra_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_without_crs",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_without_crs_with_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_can_overwrite",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_overwrite_raises_error",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_with_name",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_second_distance_with_name_and_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_doesnt_change_existing_distance",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_only_adds_distance_column_and_doesnt_otherwise_alter_df",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_latlon",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_latlon_with_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_latlon_with_wrong_units_raises_error",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_latlon_with_wrong_time_units_raises_error",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_distance_latlon_numerical_issues",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_timedelta",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_timedelta_overwrite_raises_error",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_add_timedelta_can_overwrite",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_bbox",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_length",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_length_spherical",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_length_spherical_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_length_euclidiean",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_length_euclidiean_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_direction",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_sampling_interval",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_get_sampling_interval_irregular",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_offset_seconds",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_offset_minutes",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_nonchronological_input",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_plot_exists",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_explore_exists",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_hvplot_exists",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_hvplot_with_speed_exists",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_hvplot_exists_without_crs",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_tolinestring_does_not_alter_df",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_getlength_does_not_alter_df",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_str_does_not_alter_df",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_plot_does_not_alter_df",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_linestringbetween_does_not_alter_df",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_getpositionat_does_not_alter_df",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_support_for_subclasses_of_point",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_support_for_other_geometry_column_names",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_support_for_other_time_column_names",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_to_point_gdf",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_to_point_gdf_return_tz",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_to_point_gdf_dont_return_tz",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_to_line_gdf",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_to_traj_gdf",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_to_mf_json_tostr",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_to_mf_json",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_error_due_to_wrong_gdf_index",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_mcp_poly",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_mcp_line",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_distance",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_distance_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_distance_warning",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_hausdorff_distance",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_hausdorff_distance_units",
"movingpandas/tests/test_trajectory.py::TestTrajectory::test_hausdorff_distance_warning"
] | [] | BSD 3-Clause "New" or "Revised" License | 20,162 | 406 | [
"movingpandas/trajectory.py"
] |
|
reagento__dishka-297 | 8fd67f43bb1662e802415177108c43f4e578470f | 2024-11-05 21:33:36 | 88ecccd133906090ff044adf801f7e028205cccb | diff --git a/src/dishka/registry.py b/src/dishka/registry.py
index c7d67b5..fd58d3c 100644
--- a/src/dishka/registry.py
+++ b/src/dishka/registry.py
@@ -14,13 +14,16 @@ from .factory_compiler import compile_factory
class Registry:
- __slots__ = ("scope", "factories", "compiled", "compiled_async")
+ __slots__ = (
+ "scope", "factories", "compiled", "compiled_async", "has_fallback",
+ )
- def __init__(self, scope: BaseScope):
+ def __init__(self, scope: BaseScope, *, has_fallback: bool) -> None:
self.scope = scope
self.factories: dict[DependencyKey, Factory] = {}
self.compiled: dict[DependencyKey, Callable[..., Any]] = {}
self.compiled_async: dict[DependencyKey, Callable[..., Any]] = {}
+ self.has_fallback = has_fallback
def add_factory(
self,
@@ -64,7 +67,7 @@ class Registry:
origin = get_origin(dependency.type_hint)
if not origin:
return None
- if origin is type:
+ if origin is type and self.has_fallback:
return self._get_type_var_factory(dependency)
origin_key = DependencyKey(origin, dependency.component)
@@ -112,10 +115,10 @@ class Registry:
hint = source_dependency.type_hint
if isinstance(hint, TypeVar):
hint = params_replacement[hint]
- elif get_origin(hint):
+ elif get_origin(hint) and (type_vars:=get_type_vars(hint)):
hint = hint[tuple(
params_replacement[param]
- for param in get_type_vars(hint)
+ for param in type_vars
)]
new_dependencies.append(DependencyKey(
hint, source_dependency.component,
@@ -125,10 +128,10 @@ class Registry:
hint = source_dependency.type_hint
if isinstance(hint, TypeVar):
hint = params_replacement[hint]
- elif get_origin(hint):
+ elif get_origin(hint) and (type_vars:=get_type_vars(hint)):
hint = hint[tuple(
params_replacement[param]
- for param in get_type_vars(hint)
+ for param in type_vars
)]
new_kw_dependencies[name] = DependencyKey(
hint, source_dependency.component,
diff --git a/src/dishka/registry_builder.py b/src/dishka/registry_builder.py
index ccc7526..a314207 100644
--- a/src/dishka/registry_builder.py
+++ b/src/dishka/registry_builder.py
@@ -170,8 +170,9 @@ class RegistryBuilder:
self.aliases[provides] = alias
def _init_registries(self) -> None:
+ has_fallback = True
for scope in self.scopes:
- registry = Registry(scope)
+ registry = Registry(scope, has_fallback=has_fallback)
context_var = ContextVariable(
provides=DependencyKey(self.container_type, DEFAULT_COMPONENT),
scope=scope,
@@ -180,6 +181,7 @@ class RegistryBuilder:
for component in self.components:
registry.add_factory(context_var.as_factory(component))
self.registries[scope] = registry
+ has_fallback = False
def _process_factory(
self, provider: BaseProvider, factory: Factory,
| Abstract type provider not working.
MRE:
dishka==1.4.0
```py
from dishka import Provider, Scope, provide, make_container
from abc import ABC, abstractmethod
class AbstractFoo(ABC):
@abstractmethod
def bar(self) -> None: ...
class SolidFoo(AbstractFoo):
def bar(self) -> None:
print('hello')
class MREProvider(Provider):
@provide(scope=Scope.APP)
def get_foo_class(self) -> type[AbstractFoo]:
return SolidFoo
@provide(scope=Scope.REQUEST)
def provide_foo(self, foo_cls: type[AbstractFoo]) -> AbstractFoo:
return foo_cls()
def main() -> None:
c1 = make_container(MREProvider())
with c1(scope=Scope.REQUEST) as c2:
c2.get(AbstractFoo)
main()
```
causes:
```
TypeError: Can't instantiate abstract class AbstractFoo without an implementation for abstract method 'bar'
```
However, if we change scope of type provide to REQUEST, it works:
```
@provide(scope=Scope.REQUEST)
def get_foo_class(self) -> type[AbstractFoo]:
return SolidFoo
```
Downgrading:
dishka==1.2.0 - works OK
dishka==1.3.0 - works OK | reagento/dishka | diff --git a/tests/unit/container/test_generic.py b/tests/unit/container/test_generic.py
index 79b307f..4b00e59 100644
--- a/tests/unit/container/test_generic.py
+++ b/tests/unit/container/test_generic.py
@@ -207,3 +207,18 @@ def test_passing_type_var_decorator():
provider.decorate(type_var_decorator)
container = make_container(provider)
assert container.get(int) == (int, 1, A(42, y=42))
+
+
+
+def func_with_type(param: type[T], param2: type[int]) -> A[T]:
+ return param, param2
+
+
+def test_provide_type_non_generic():
+ provider = Provider(scope=Scope.APP)
+ provider.provide(func_with_type, scope=Scope.REQUEST)
+ provider.provide(lambda: bool, provides=type[int])
+ container = make_container(provider)
+ with container() as c:
+ assert c.get(A[str]) == (str, bool)
+ assert c.get(A[int]) == (bool, bool)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"ruff",
"tox",
"tox-uv",
"mypy",
"pytest"
],
"pre_install": [],
"python": "3.10",
"reqs_path": [
"requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
-e git+https://github.com/reagento/dishka.git@8fd67f43bb1662e802415177108c43f4e578470f#egg=dishka
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
iniconfig==2.1.0
mypy==1.12.1
mypy-extensions==1.0.0
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pyproject-api==1.9.0
pytest==8.3.5
ruff==0.6.9
tomli==2.2.1
tox==4.21.2
tox-uv==1.15.0
typing_extensions==4.13.0
uv==0.6.11
virtualenv==20.29.3
| name: dishka
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- dishka==0.1
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- iniconfig==2.1.0
- mypy==1.12.1
- mypy-extensions==1.0.0
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pyproject-api==1.9.0
- pytest==8.3.5
- ruff==0.6.9
- tomli==2.2.1
- tox==4.21.2
- tox-uv==1.15.0
- typing-extensions==4.13.0
- uv==0.6.11
- virtualenv==20.29.3
prefix: /opt/conda/envs/dishka
| [
"tests/unit/container/test_generic.py::test_provide_type_non_generic"
] | [] | [
"tests/unit/container/test_generic.py::test_concrete_generic[A]",
"tests/unit/container/test_generic.py::test_concrete_generic[B]",
"tests/unit/container/test_generic.py::test_concrete_generic[ReplaceInit]",
"tests/unit/container/test_generic.py::test_concrete_child",
"tests/unit/container/test_generic.py::test_generic_class",
"tests/unit/container/test_generic.py::test_bare_generic_method",
"tests/unit/container/test_generic.py::test_generic_func",
"tests/unit/container/test_generic.py::test_generic_validation_ok",
"tests/unit/container/test_generic.py::test_generic_validation_typevar_ok",
"tests/unit/container/test_generic.py::test_generic_validation_fail",
"tests/unit/container/test_generic.py::test_passing_type_var",
"tests/unit/container/test_generic.py::test_func_with_generic_params",
"tests/unit/container/test_generic.py::test_passing_type_var_decorator"
] | [] | Apache License 2.0 | 20,163 | 811 | [
"src/dishka/registry.py",
"src/dishka/registry_builder.py"
] |
|
radiocosmology__alpenhorn-214 | 1d17a4686ccf5cee20846ea2f27fc46f7e1c8c5d | 2024-11-07 22:29:13 | 1d17a4686ccf5cee20846ea2f27fc46f7e1c8c5d | diff --git a/alpenhorn/db/storage.py b/alpenhorn/db/storage.py
index 2fc243a..0227831 100644
--- a/alpenhorn/db/storage.py
+++ b/alpenhorn/db/storage.py
@@ -240,8 +240,8 @@ class StorageNode(base_model):
# Check has_file
return copy.has_file != "N"
- def filecopy_present(self, file: ArchiveFile) -> bool:
- """Is a copy of ArchiveFile `file` present on this node?
+ def filecopy_state(self, file: ArchiveFile) -> str:
+ """What is the state of `file` on this node?
Parameters
----------
@@ -250,22 +250,25 @@ class StorageNode(base_model):
Returns
-------
- filecopy_present : bool
- True if there is an ArchiveFileCopy of `file`r
- with `has_file=='Y'` on this node. False otherwise.
+ filecopy_state : str
+ One of:
+ - 'Y' file copy exists
+ - 'X' file copy is corrupt
+ - 'M' file copy needs to be checked
+ - 'N' file copy does not exist
"""
from .archive import ArchiveFileCopy
try:
- ArchiveFileCopy.get(
+ copy = ArchiveFileCopy.get(
ArchiveFileCopy.file == file,
ArchiveFileCopy.node == self,
- ArchiveFileCopy.has_file == "Y",
)
+ return copy.has_file
except pw.DoesNotExist:
- return False
+ pass
- return True
+ return "N"
def get_total_gb(self) -> float:
"""Sum the size in GiB of all files on this node.
diff --git a/alpenhorn/io/_default_asyncs.py b/alpenhorn/io/_default_asyncs.py
index 16b926f..344471e 100644
--- a/alpenhorn/io/_default_asyncs.py
+++ b/alpenhorn/io/_default_asyncs.py
@@ -8,6 +8,7 @@ import errno
import shutil
import logging
import pathlib
+import peewee as pw
from . import ioutil
from ..db import ArchiveFileCopy, ArchiveFileCopyRequest, utcnow
@@ -335,21 +336,8 @@ def group_search_async(
# ready == False is the safe option here: copy will be readied
# during the subsequent check if needed.
- count = (
- ArchiveFileCopy.update(
- has_file="M",
- wants_file="Y",
- ready=False,
- last_update=utcnow(),
- )
- .where(
- ArchiveFileCopy.file == req.file,
- ArchiveFileCopy.node == node.db,
- )
- .execute()
- )
- if count == 0:
- # Create new copy
+ try:
+ # Try to create a new copy
ArchiveFileCopy.create(
file=req.file,
node=node.db,
@@ -358,6 +346,17 @@ def group_search_async(
ready=False,
size_b=node.io.filesize(req.file.path, actual=True),
)
+ except pw.IntegrityError:
+ # Copy already exists, just update the existing
+ ArchiveFileCopy.update(
+ has_file="M",
+ wants_file="Y",
+ ready=False,
+ last_update=utcnow(),
+ ).where(
+ ArchiveFileCopy.file == req.file,
+ ArchiveFileCopy.node == node.db,
+ ).execute()
return
# Otherwise, escallate to groupio.pull_force to actually perform the pull
diff --git a/alpenhorn/server/update.py b/alpenhorn/server/update.py
index 34380d5..27df4d0 100644
--- a/alpenhorn/server/update.py
+++ b/alpenhorn/server/update.py
@@ -469,14 +469,20 @@ class UpdateableNode(updateable_base):
ArchiveFileCopyRequest.cancelled == 0,
ArchiveFileCopyRequest.node_from == self.db,
):
- if self.db.filecopy_present(req.file):
+ state = self.db.filecopy_state(req.file)
+ if state == "Y":
self._io_happened = True
self.io.ready_pull(req)
else:
+ reasons = {
+ "N": "not present",
+ "M": "needs check",
+ "X": "corrupt",
+ }
log.info(
"Ignoring ready request for "
f"{req.file.acq.name}/{req.file.name} "
- f"on node {self.name}: not present."
+ f"on node {self.name}: {reasons[state]}."
)
self._updated = True
@@ -629,8 +635,10 @@ class UpdateableGroup(updateable_base):
)
return
- # If the source file doesn't exist, cancel the request.
- if not req.node_from.filecopy_present(req.file):
+ # If the source file doesn't exist, cancel the request. If the
+ # source is suspect, skip the request.
+ state = req.node_from.filecopy_state(req.file)
+ if state == "N" or state == "X":
log.warning(
f"Cancelling request for {req.file.acq.name}/{req.file.name}:"
f" not available on node {req.node_from.name}. "
@@ -640,6 +648,12 @@ class UpdateableGroup(updateable_base):
ArchiveFileCopyRequest.id == req.id
).execute()
return
+ elif state == "M":
+ log.info(
+ f"Skipping request for {req.file.acq.name}/{req.file.name}:"
+ f" source needs check on node {req.node_from.name}."
+ )
+ return
# If the source file is not ready, skip the request.
node_from = RemoteNode(req.node_from)
| Alpenhorn cancels AFCR for source with `has_file='M'`
Should probably just skip it until after the check. | radiocosmology/alpenhorn | diff --git a/tests/db/test_storage.py b/tests/db/test_storage.py
index 70b754f..5c75828 100644
--- a/tests/db/test_storage.py
+++ b/tests/db/test_storage.py
@@ -282,19 +282,27 @@ def test_namedcopytracked(simplegroup, storagenode, simplefile, archivefilecopy)
assert node.named_copy_tracked(acqname, filename) is False
-def test_copypresent(simplegroup, storagenode, simplefile, archivefilecopy):
- """Test StorageNode.filecopy_present()."""
+def test_node_copystate(simplegroup, storagenode, simplefile, archivefilecopy):
+ """Test StorageNode.filecopy_state()."""
node = storagenode(name="present", group=simplegroup)
archivefilecopy(file=simplefile, node=node, has_file="Y")
- assert node.filecopy_present(simplefile) is True
+ assert node.filecopy_state(simplefile) == "Y"
+
+ node = storagenode(name="suspect", group=simplegroup)
+ archivefilecopy(file=simplefile, node=node, has_file="M")
+ assert node.filecopy_state(simplefile) == "M"
node = storagenode(name="corrupt", group=simplegroup)
archivefilecopy(file=simplefile, node=node, has_file="X")
- assert node.filecopy_present(simplefile) is False
+ assert node.filecopy_state(simplefile) == "X"
+
+ node = storagenode(name="removed", group=simplegroup)
+ archivefilecopy(file=simplefile, node=node, has_file="N")
+ assert node.filecopy_state(simplefile) == "N"
node = storagenode(name="missing", group=simplegroup)
- assert node.filecopy_present(simplefile) is False
+ assert node.filecopy_state(simplefile) == "N"
def test_allfiles(simplenode, simpleacq, archivefile, archivefilecopy):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.10",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/radiocosmology/alpenhorn.git@1d17a4686ccf5cee20846ea2f27fc46f7e1c8c5d#egg=alpenhorn
bcrypt==4.3.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
chimedb @ git+https://github.com/chime-experiment/chimedb.git@d82f48eb0599393723e7ee5d756aff6c6830db32
click==8.1.8
concurrent-log-handler==0.9.25
cryptography==44.0.2
docker==7.1.0
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
mysql-connector-python==8.0.29
packaging==24.2
paramiko==3.5.1
peewee==3.17.9
pluggy==1.5.0
portalocker==3.1.1
protobuf==6.30.2
pycparser==2.22
pyfakefs==5.8.0
PyNaCl==1.5.0
pytest==8.3.5
PyYAML==6.0.2
requests==2.32.3
sshtunnel==0.4.0
tabulate==0.9.0
tomli==2.2.1
ujson==5.10.0
urllib3==2.3.0
watchdog==6.0.0
| name: alpenhorn
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alpenhorn==2.0.0a1
- bcrypt==4.3.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- chimedb==24.8.0.post2+git.d82f48eb
- click==8.1.8
- concurrent-log-handler==0.9.25
- cryptography==44.0.2
- docker==7.1.0
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- mysql-connector-python==8.0.29
- packaging==24.2
- paramiko==3.5.1
- peewee==3.17.9
- pluggy==1.5.0
- portalocker==3.1.1
- protobuf==6.30.2
- pycparser==2.22
- pyfakefs==5.8.0
- pynacl==1.5.0
- pytest==8.3.5
- pyyaml==6.0.2
- requests==2.32.3
- sshtunnel==0.4.0
- tabulate==0.9.0
- tomli==2.2.1
- ujson==5.10.0
- urllib3==2.3.0
- watchdog==6.0.0
prefix: /opt/conda/envs/alpenhorn
| [
"tests/db/test_storage.py::test_node_copystate"
] | [] | [
"tests/db/test_storage.py::test_schema",
"tests/db/test_storage.py::test_group_model",
"tests/db/test_storage.py::test_storage_model",
"tests/db/test_storage.py::test_local",
"tests/db/test_storage.py::test_copy_state",
"tests/db/test_storage.py::test_copy_state_multi",
"tests/db/test_storage.py::test_archive_property",
"tests/db/test_storage.py::test_undermin",
"tests/db/test_storage.py::test_totalgb",
"tests/db/test_storage.py::test_overmax",
"tests/db/test_storage.py::test_namedcopytracked",
"tests/db/test_storage.py::test_allfiles",
"tests/db/test_storage.py::test_update_avail_gb",
"tests/db/test_storage.py::test_edge_model",
"tests/db/test_storage.py::test_edge_self_loop"
] | [] | MIT License | 20,182 | 1,379 | [
"alpenhorn/db/storage.py",
"alpenhorn/io/_default_asyncs.py",
"alpenhorn/server/update.py"
] |
|
modern-python__that-depends-118 | 3657c23eb199b49e2e535a109c82fd57c4f4d6ee | 2024-11-10 09:59:31 | 3657c23eb199b49e2e535a109c82fd57c4f4d6ee | diff --git a/that_depends/providers/object.py b/that_depends/providers/object.py
index a0095c7..2b218a3 100644
--- a/that_depends/providers/object.py
+++ b/that_depends/providers/object.py
@@ -15,7 +15,9 @@ class Object(AbstractProvider[T_co]):
self._obj: typing.Final = obj
async def async_resolve(self) -> T_co:
- return self._obj
+ return self.sync_resolve()
def sync_resolve(self) -> T_co:
+ if self._override is not None:
+ return typing.cast(T_co, self._override)
return self._obj
| Allow overriding Object provider
Currently, it's not possible to override the `Object` provider. For example:
```python
class DIContainer(BaseContainer):
object = providers.Object(42)
with DIContainer.override_providers({"object": 123}):
assert DIContainer.object.sync_resolve() == 123 # Raises AssertionError 42 == 123
```
However, it is possible to override similar providers, such as `Singleton` or `Factory`. I've encountered such a case at my work, so I suggest making it possible to override `Object` providers for consistent behavior. | modern-python/that-depends | diff --git a/tests/container.py b/tests/container.py
index 944c92d..69b1277 100644
--- a/tests/container.py
+++ b/tests/container.py
@@ -66,3 +66,4 @@ class DIContainer(BaseContainer):
async_resource=async_resource.cast,
)
singleton = providers.Singleton(SingletonFactory, dep1=True)
+ object = providers.Object(object())
diff --git a/tests/providers/test_providers_overriding.py b/tests/providers/test_providers_overriding.py
index a6dd0b5..1f19b10 100644
--- a/tests/providers/test_providers_overriding.py
+++ b/tests/providers/test_providers_overriding.py
@@ -11,6 +11,7 @@ async def test_batch_providers_overriding() -> None:
async_factory_mock = datetime.datetime.fromisoformat("2025-01-01")
simple_factory_mock = container.SimpleFactory(dep1="override", dep2=999)
singleton_mock = container.SingletonFactory(dep1=False)
+ object_mock = object()
providers_for_overriding = {
"async_resource": async_resource_mock,
@@ -18,6 +19,7 @@ async def test_batch_providers_overriding() -> None:
"simple_factory": simple_factory_mock,
"singleton": singleton_mock,
"async_factory": async_factory_mock,
+ "object": object_mock,
}
with container.DIContainer.override_providers(providers_for_overriding):
@@ -25,6 +27,7 @@ async def test_batch_providers_overriding() -> None:
dependent_factory = await container.DIContainer.dependent_factory()
singleton = await container.DIContainer.singleton()
async_factory = await container.DIContainer.async_factory()
+ obj = await container.DIContainer.object()
assert dependent_factory.simple_factory.dep1 == simple_factory_mock.dep1
assert dependent_factory.simple_factory.dep2 == simple_factory_mock.dep2
@@ -32,6 +35,7 @@ async def test_batch_providers_overriding() -> None:
assert dependent_factory.async_resource == async_resource_mock
assert singleton is singleton_mock
assert async_factory is async_factory_mock
+ assert obj is object_mock
assert (await container.DIContainer.async_resource()) != async_resource_mock
@@ -41,12 +45,14 @@ async def test_batch_providers_overriding_sync_resolve() -> None:
sync_resource_mock = datetime.datetime.fromisoformat("2024-01-01")
simple_factory_mock = container.SimpleFactory(dep1="override", dep2=999)
singleton_mock = container.SingletonFactory(dep1=False)
+ object_mock = object()
providers_for_overriding = {
"async_resource": async_resource_mock,
"sync_resource": sync_resource_mock,
"simple_factory": simple_factory_mock,
"singleton": singleton_mock,
+ "object": object_mock,
}
with container.DIContainer.override_providers(providers_for_overriding):
@@ -54,12 +60,14 @@ async def test_batch_providers_overriding_sync_resolve() -> None:
await container.DIContainer.async_resource.async_resolve()
dependent_factory = container.DIContainer.dependent_factory.sync_resolve()
singleton = container.DIContainer.singleton.sync_resolve()
+ obj = container.DIContainer.object.sync_resolve()
assert dependent_factory.simple_factory.dep1 == simple_factory_mock.dep1
assert dependent_factory.simple_factory.dep2 == simple_factory_mock.dep2
assert dependent_factory.sync_resource == sync_resource_mock
assert dependent_factory.async_resource == async_resource_mock
assert singleton is singleton_mock
+ assert obj is object_mock
assert container.DIContainer.sync_resource.sync_resolve() != sync_resource_mock
@@ -88,16 +96,19 @@ async def test_providers_overriding() -> None:
async_factory_mock = datetime.datetime.fromisoformat("2025-01-01")
simple_factory_mock = container.SimpleFactory(dep1="override", dep2=999)
singleton_mock = container.SingletonFactory(dep1=False)
+ object_mock = object()
container.DIContainer.async_resource.override(async_resource_mock)
container.DIContainer.sync_resource.override(sync_resource_mock)
container.DIContainer.simple_factory.override(simple_factory_mock)
container.DIContainer.singleton.override(singleton_mock)
container.DIContainer.async_factory.override(async_factory_mock)
+ container.DIContainer.object.override(object_mock)
await container.DIContainer.simple_factory()
dependent_factory = await container.DIContainer.dependent_factory()
singleton = await container.DIContainer.singleton()
async_factory = await container.DIContainer.async_factory()
+ obj = await container.DIContainer.object()
assert dependent_factory.simple_factory.dep1 == simple_factory_mock.dep1
assert dependent_factory.simple_factory.dep2 == simple_factory_mock.dep2
@@ -105,6 +116,7 @@ async def test_providers_overriding() -> None:
assert dependent_factory.async_resource == async_resource_mock
assert singleton is singleton_mock
assert async_factory is async_factory_mock
+ assert obj is object_mock
container.DIContainer.reset_override()
assert (await container.DIContainer.async_resource()) != async_resource_mock
@@ -115,21 +127,25 @@ async def test_providers_overriding_sync_resolve() -> None:
sync_resource_mock = datetime.datetime.fromisoformat("2024-01-01")
simple_factory_mock = container.SimpleFactory(dep1="override", dep2=999)
singleton_mock = container.SingletonFactory(dep1=False)
+ object_mock = object()
container.DIContainer.async_resource.override(async_resource_mock)
container.DIContainer.sync_resource.override(sync_resource_mock)
container.DIContainer.simple_factory.override(simple_factory_mock)
container.DIContainer.singleton.override(singleton_mock)
+ container.DIContainer.object.override(object_mock)
container.DIContainer.simple_factory.sync_resolve()
await container.DIContainer.async_resource.async_resolve()
dependent_factory = container.DIContainer.dependent_factory.sync_resolve()
singleton = container.DIContainer.singleton.sync_resolve()
+ obj = container.DIContainer.object.sync_resolve()
assert dependent_factory.simple_factory.dep1 == simple_factory_mock.dep1
assert dependent_factory.simple_factory.dep2 == simple_factory_mock.dep2
assert dependent_factory.sync_resource == sync_resource_mock
assert dependent_factory.async_resource == async_resource_mock
assert singleton is singleton_mock
+ assert obj is object_mock
container.DIContainer.reset_override()
assert container.DIContainer.sync_resource.sync_resolve() != sync_resource_mock
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.23 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.10",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
-e git+https://github.com/modern-python/that-depends.git@3657c23eb199b49e2e535a109c82fd57c4f4d6ee#egg=that_depends
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: that-depends
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py310h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py310h06a4308_0
- pip=25.0=py310h06a4308_0
- pluggy=1.5.0=py310h06a4308_0
- pytest=8.3.4=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py310h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- that-depends==1.23.0
prefix: /opt/conda/envs/that-depends
| [
"tests/providers/test_providers_overriding.py::test_batch_providers_overriding",
"tests/providers/test_providers_overriding.py::test_batch_providers_overriding_sync_resolve",
"tests/providers/test_providers_overriding.py::test_providers_overriding",
"tests/providers/test_providers_overriding.py::test_providers_overriding_sync_resolve"
] | [] | [
"tests/providers/test_providers_overriding.py::test_providers_overriding_with_context_manager",
"tests/providers/test_providers_overriding.py::test_providers_overriding_fail_with_unknown_provider"
] | [] | MIT License | 20,203 | 158 | [
"that_depends/providers/object.py"
] |
|
projectmesa__mesa-2487 | a04e20b46d1d11a13163277cb8bcbac86d4851ff | 2024-11-11 13:04:09 | 134995f45c8ac3c1b4ce12ca05c21d4654962d02 | diff --git a/mesa/examples/advanced/sugarscape_g1mt/app.py b/mesa/examples/advanced/sugarscape_g1mt/app.py
index 8a5441cc..6564cb68 100644
--- a/mesa/examples/advanced/sugarscape_g1mt/app.py
+++ b/mesa/examples/advanced/sugarscape_g1mt/app.py
@@ -4,7 +4,7 @@ from matplotlib.figure import Figure
from mesa.examples.advanced.sugarscape_g1mt.agents import Trader
from mesa.examples.advanced.sugarscape_g1mt.model import SugarscapeG1mt
-from mesa.visualization import SolaraViz, make_plot_component
+from mesa.visualization import Slider, SolaraViz, make_plot_component
def SpaceDrawer(model):
@@ -49,13 +49,29 @@ def SpaceDrawer(model):
model_params = {
"width": 50,
"height": 50,
+ # Population parameters
+ "initial_population": Slider(
+ "Initial Population", value=200, min=50, max=500, step=10
+ ),
+ # Agent endowment parameters
+ "endowment_min": Slider("Min Initial Endowment", value=25, min=5, max=30, step=1),
+ "endowment_max": Slider("Max Initial Endowment", value=50, min=30, max=100, step=1),
+ # Metabolism parameters
+ "metabolism_min": Slider("Min Metabolism", value=1, min=1, max=3, step=1),
+ "metabolism_max": Slider("Max Metabolism", value=5, min=3, max=8, step=1),
+ # Vision parameters
+ "vision_min": Slider("Min Vision", value=1, min=1, max=3, step=1),
+ "vision_max": Slider("Max Vision", value=5, min=3, max=8, step=1),
+ # Trade parameter
+ "enable_trade": {"type": "Checkbox", "value": True, "label": "Enable Trading"},
}
-model1 = SugarscapeG1mt(50, 50)
+model1 = SugarscapeG1mt()
page = SolaraViz(
model1,
components=[SpaceDrawer, make_plot_component(["Trader", "Price"])],
+ model_params=model_params,
name="Sugarscape {G1, M, T}",
play_interval=150,
)
diff --git a/mesa/examples/advanced/wolf_sheep/app.py b/mesa/examples/advanced/wolf_sheep/app.py
index fa66d2bf..878c6797 100644
--- a/mesa/examples/advanced/wolf_sheep/app.py
+++ b/mesa/examples/advanced/wolf_sheep/app.py
@@ -83,7 +83,7 @@ lineplot_component = make_plot_component(
)
simulator = ABMSimulator()
-model = WolfSheep(simulator, grass=True)
+model = WolfSheep(simulator=simulator, grass=True)
page = SolaraViz(
model,
diff --git a/mesa/examples/basic/boid_flockers/agents.py b/mesa/examples/basic/boid_flockers/agents.py
index 48ce2b5a..2ff00cba 100644
--- a/mesa/examples/basic/boid_flockers/agents.py
+++ b/mesa/examples/basic/boid_flockers/agents.py
@@ -54,13 +54,14 @@ class Boid(Agent):
self.cohere_factor = cohere
self.separate_factor = separate
self.match_factor = match
+ self.neighbors = []
def step(self):
"""Get the Boid's neighbors, compute the new vector, and move accordingly."""
- neighbors = self.model.space.get_neighbors(self.pos, self.vision, False)
+ self.neighbors = self.model.space.get_neighbors(self.pos, self.vision, False)
# If no neighbors, maintain current direction
- if not neighbors:
+ if not self.neighbors:
new_pos = self.pos + self.direction * self.speed
self.model.space.move_agent(self, new_pos)
return
@@ -71,7 +72,7 @@ class Boid(Agent):
separation_vector = np.zeros(2) # Separation vector
# Calculate the contribution of each neighbor to the three behaviors
- for neighbor in neighbors:
+ for neighbor in self.neighbors:
heading = self.model.space.get_heading(self.pos, neighbor.pos)
distance = self.model.space.get_distance(self.pos, neighbor.pos)
@@ -86,7 +87,7 @@ class Boid(Agent):
match_vector += neighbor.direction
# Weight each behavior by its factor and normalize by number of neighbors
- n = len(neighbors)
+ n = len(self.neighbors)
cohere = cohere * self.cohere_factor
separation_vector = separation_vector * self.separate_factor
match_vector = match_vector * self.match_factor
diff --git a/mesa/examples/basic/boid_flockers/app.py b/mesa/examples/basic/boid_flockers/app.py
index effff8c2..074c1ab3 100644
--- a/mesa/examples/basic/boid_flockers/app.py
+++ b/mesa/examples/basic/boid_flockers/app.py
@@ -3,10 +3,7 @@ from mesa.visualization import Slider, SolaraViz, make_space_component
def boid_draw(agent):
- if not agent.neighbors: # Only for the first Frame
- neighbors = len(agent.model.space.get_neighbors(agent.pos, agent.vision, False))
- else:
- neighbors = len(agent.neighbors)
+ neighbors = len(agent.neighbors)
if neighbors <= 1:
return {"color": "red", "size": 20}
diff --git a/mesa/examples/basic/schelling/app.py b/mesa/examples/basic/schelling/app.py
index 73492fa0..933b3c27 100644
--- a/mesa/examples/basic/schelling/app.py
+++ b/mesa/examples/basic/schelling/app.py
@@ -26,7 +26,7 @@ model_params = {
"height": 20,
}
-model1 = Schelling(20, 20, 0.8, 0.2, 3)
+model1 = Schelling()
HappyPlot = make_plot_component({"happy": "tab:green"})
diff --git a/mesa/visualization/solara_viz.py b/mesa/visualization/solara_viz.py
index 9819e1c9..10718e8a 100644
--- a/mesa/visualization/solara_viz.py
+++ b/mesa/visualization/solara_viz.py
@@ -43,10 +43,10 @@ if TYPE_CHECKING:
@solara.component
def SolaraViz(
model: Model | solara.Reactive[Model],
- *,
components: list[reacton.core.Component]
| list[Callable[[Model], reacton.core.Component]]
| Literal["default"] = "default",
+ *,
play_interval: int = 100,
simulator: Simulator | None = None,
model_params=None,
@@ -276,7 +276,7 @@ def SimulatorController(
running.value = True
simulator.reset()
model.value = model.value = model.value.__class__(
- simulator, **model_parameters.value
+ simulator=simulator, **model_parameters.value
)
def do_play_pause():
| examples: Sugerscape_g1mt is missing input Model Params (Sliders etc.)
The Sugerscape_g1mt app.py is missing any input Model Params (like Sliders etc.) to play with in the visualisation.
https://github.com/projectmesa/mesa/blob/a04e20b46d1d11a13163277cb8bcbac86d4851ff/mesa/examples/advanced/sugarscape_g1mt/app.py#L49-L52
I tried to add these:
```Python
model_params = {
# Grid parameters
"width": Slider("Grid Width", value=50, min=20, max=100, step=10),
"height": Slider("Grid Height", value=50, min=20, max=100, step=10),
# Population parameters
"initial_population": Slider("Initial Population", value=200, min=50, max=500, step=10),
# Agent endowment parameters
"endowment_min": Slider("Min Initial Endowment", value=25, min=5, max=30, step=1),
"endowment_max": Slider("Max Initial Endowment", value=50, min=30, max=100, step=1),
# Metabolism parameters
"metabolism_min": Slider("Min Metabolism", value=1, min=1, max=3, step=1),
"metabolism_max": Slider("Max Metabolism", value=5, min=3, max=8, step=1),
# Vision parameters
"vision_min": Slider("Min Vision", value=1, min=1, max=3, step=1),
"vision_max": Slider("Max Vision", value=5, min=3, max=8, step=1),
# Trade parameter
"enable_trade": {"type": "Checkbox", "value": True, "label": "Enable Trading"},
}
```
But modifying things lead to all kinds of error and unexpected behavior. For example, changing the width only did so partially (and apparently the height instead of the width):

I suspect that's due to the model first being initialized (with `model1 = SugarscapeG1mt()`), and then the slider value being modified.
CC @Corvince | projectmesa/mesa | diff --git a/tests/test_examples.py b/tests/test_examples.py
index 98d1d580..0e8a7edc 100644
--- a/tests/test_examples.py
+++ b/tests/test_examples.py
@@ -13,6 +13,10 @@ from mesa.examples import (
def test_boltzmann_model(): # noqa: D103
+ from mesa.examples.basic.boltzmann_wealth_model import app
+
+ app.page # noqa: B018
+
model = BoltzmannWealth(seed=42)
for _i in range(10):
@@ -20,24 +24,40 @@ def test_boltzmann_model(): # noqa: D103
def test_conways_game_model(): # noqa: D103
+ from mesa.examples.basic.conways_game_of_life import app
+
+ app.page # noqa: B018
+
model = ConwaysGameOfLife(seed=42)
for _i in range(10):
model.step()
def test_schelling_model(): # noqa: D103
+ from mesa.examples.basic.schelling import app
+
+ app.page # noqa: B018
+
model = Schelling(seed=42)
for _i in range(10):
model.step()
def test_virus_on_network(): # noqa: D103
+ from mesa.examples.basic.virus_on_network import app
+
+ app.page # noqa: B018
+
model = VirusOnNetwork(seed=42)
for _i in range(10):
model.step()
def test_boid_flockers(): # noqa: D103
+ from mesa.examples.basic.boid_flockers import app
+
+ app.page # noqa: B018
+
model = BoidFlockers(seed=42)
for _i in range(10):
@@ -45,6 +65,10 @@ def test_boid_flockers(): # noqa: D103
def test_epstein(): # noqa: D103
+ from mesa.examples.advanced.epstein_civil_violence import app
+
+ app.page # noqa: B018
+
model = EpsteinCivilViolence(seed=42)
for _i in range(10):
@@ -52,6 +76,10 @@ def test_epstein(): # noqa: D103
def test_pd_grid(): # noqa: D103
+ from mesa.examples.advanced.pd_grid import app
+
+ app.page # noqa: B018
+
model = PdGrid(seed=42)
for _i in range(10):
@@ -59,6 +87,10 @@ def test_pd_grid(): # noqa: D103
def test_sugarscape_g1mt(): # noqa: D103
+ from mesa.examples.advanced.sugarscape_g1mt import app
+
+ app.page # noqa: B018
+
model = SugarscapeG1mt(seed=42)
for _i in range(10):
@@ -66,8 +98,11 @@ def test_sugarscape_g1mt(): # noqa: D103
def test_wolf_sheep(): # noqa: D103
+ from mesa.examples.advanced.wolf_sheep import app
from mesa.experimental.devs import ABMSimulator
+ app.page # noqa: B018
+
simulator = ABMSimulator()
WolfSheep(seed=42, simulator=simulator)
simulator.run_for(10)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 6
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-mock"
],
"pre_install": null,
"python": "3.11",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | accessible-pygments==0.0.5
alabaster==1.0.0
anyio==4.9.0
asttokens==3.0.0
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
cachetools==5.5.2
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
comm==0.2.2
contourpy==1.3.1
coverage==7.8.0
cycler==0.12.1
debugpy==1.8.13
decorator==5.2.1
docutils==0.21.2
executing==2.2.0
fastjsonschema==2.21.1
filelock==3.18.0
fonttools==4.56.0
greenlet==3.1.1
h11==0.14.0
humanize==4.12.2
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
ipykernel==6.29.5
ipython==9.0.2
ipython_pygments_lexers==1.1.1
ipyvue==1.11.2
ipyvuetify==1.11.1
ipywidgets==8.1.5
jedi==0.19.2
Jinja2==3.1.6
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-cache==1.0.1
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyterlab_widgets==3.0.13
kiwisolver==1.4.8
Markdown==3.7
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.10.1
matplotlib-inline==0.1.7
mdit-py-plugins==0.4.2
mdurl==0.1.2
-e git+https://github.com/projectmesa/mesa.git@a04e20b46d1d11a13163277cb8bcbac86d4851ff#egg=Mesa
myst-nb==1.2.0
myst-parser==4.0.1
nbclient==0.10.2
nbformat==5.10.4
nest-asyncio==1.6.0
networkx==3.4.2
numpy==2.2.4
packaging==24.2
pandas==2.2.3
parso==0.8.4
pexpect==4.9.0
pillow==11.1.0
platformdirs==4.3.7
pluggy==1.5.0
prompt_toolkit==3.0.50
psutil==7.0.0
ptyprocess==0.7.0
pure_eval==0.2.3
pydata-sphinx-theme==0.16.1
Pygments==2.19.1
pymdown-extensions==10.14.3
pyparsing==3.2.3
pytest==8.3.5
pytest-cov==6.0.0
pytest-mock==3.14.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
pyzmq==26.3.0
reacton==1.9.1
referencing==0.36.2
requests==2.32.3
rich==14.0.0
rich-click==1.8.8
roman-numerals-py==3.1.0
rpds-py==0.24.0
ruff==0.11.2
scipy==1.15.2
seaborn==0.13.2
six==1.17.0
sniffio==1.3.1
snowballstemmer==2.2.0
solara==1.44.1
solara-server==1.44.1
solara-ui==1.44.1
soupsieve==2.6
Sphinx==8.2.3
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
SQLAlchemy==2.0.40
stack-data==0.6.3
starlette==0.46.1
tabulate==0.9.0
tornado==6.4.2
tqdm==4.67.1
traitlets==5.14.3
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
uvicorn==0.34.0
watchdog==6.0.0
watchfiles==1.0.4
wcwidth==0.2.13
websockets==15.0.1
widgetsnbextension==4.0.13
zipp==3.21.0
| name: mesa
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py311h06a4308_0
- python=3.11.11=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py311h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py311h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- accessible-pygments==0.0.5
- alabaster==1.0.0
- anyio==4.9.0
- asttokens==3.0.0
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- cachetools==5.5.2
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- comm==0.2.2
- contourpy==1.3.1
- coverage==7.8.0
- cycler==0.12.1
- debugpy==1.8.13
- decorator==5.2.1
- docutils==0.21.2
- executing==2.2.0
- fastjsonschema==2.21.1
- filelock==3.18.0
- fonttools==4.56.0
- greenlet==3.1.1
- h11==0.14.0
- humanize==4.12.2
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipykernel==6.29.5
- ipython==9.0.2
- ipython-pygments-lexers==1.1.1
- ipyvue==1.11.2
- ipyvuetify==1.11.1
- ipywidgets==8.1.5
- jedi==0.19.2
- jinja2==3.1.6
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-cache==1.0.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyterlab-widgets==3.0.13
- kiwisolver==1.4.8
- markdown==3.7
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib==3.10.1
- matplotlib-inline==0.1.7
- mdit-py-plugins==0.4.2
- mdurl==0.1.2
- mesa==3.1.0.dev0
- myst-nb==1.2.0
- myst-parser==4.0.1
- nbclient==0.10.2
- nbformat==5.10.4
- nest-asyncio==1.6.0
- networkx==3.4.2
- numpy==2.2.4
- packaging==24.2
- pandas==2.2.3
- parso==0.8.4
- pexpect==4.9.0
- pillow==11.1.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prompt-toolkit==3.0.50
- psutil==7.0.0
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pydata-sphinx-theme==0.16.1
- pygments==2.19.1
- pymdown-extensions==10.14.3
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- pyzmq==26.3.0
- reacton==1.9.1
- referencing==0.36.2
- requests==2.32.3
- rich==14.0.0
- rich-click==1.8.8
- roman-numerals-py==3.1.0
- rpds-py==0.24.0
- ruff==0.11.2
- scipy==1.15.2
- seaborn==0.13.2
- six==1.17.0
- sniffio==1.3.1
- snowballstemmer==2.2.0
- solara==1.44.1
- solara-server==1.44.1
- solara-ui==1.44.1
- soupsieve==2.6
- sphinx==8.2.3
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sqlalchemy==2.0.40
- stack-data==0.6.3
- starlette==0.46.1
- tabulate==0.9.0
- tornado==6.4.2
- tqdm==4.67.1
- traitlets==5.14.3
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==2.3.0
- uvicorn==0.34.0
- watchdog==6.0.0
- watchfiles==1.0.4
- wcwidth==0.2.13
- websockets==15.0.1
- widgetsnbextension==4.0.13
- zipp==3.21.0
prefix: /opt/conda/envs/mesa
| [
"tests/test_examples.py::test_wolf_sheep"
] | [] | [
"tests/test_examples.py::test_boltzmann_model",
"tests/test_examples.py::test_conways_game_model",
"tests/test_examples.py::test_schelling_model",
"tests/test_examples.py::test_virus_on_network",
"tests/test_examples.py::test_boid_flockers",
"tests/test_examples.py::test_epstein",
"tests/test_examples.py::test_pd_grid",
"tests/test_examples.py::test_sugarscape_g1mt"
] | [] | Apache License 2.0 | 20,207 | 1,755 | [
"mesa/examples/advanced/sugarscape_g1mt/app.py",
"mesa/examples/advanced/wolf_sheep/app.py",
"mesa/examples/basic/boid_flockers/agents.py",
"mesa/examples/basic/boid_flockers/app.py",
"mesa/examples/basic/schelling/app.py",
"mesa/visualization/solara_viz.py"
] |
|
tobymao__sqlglot-4370 | a665030323b200f3bed241bb928993b9807c4100 | 2024-11-11 13:28:59 | a71cee4b4eafad9988b945c69dc75583ae105ec7 | diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py
index 1466ac27..f5f7521a 100644
--- a/sqlglot/dialects/bigquery.py
+++ b/sqlglot/dialects/bigquery.py
@@ -778,7 +778,6 @@ class BigQuery(Dialect):
exp.Array: inline_array_unless_query,
exp.ArrayContains: _array_contains_sql,
exp.ArrayFilter: filter_array_using_unnest,
- exp.ArraySize: rename_func("ARRAY_LENGTH"),
exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
exp.CollateProperty: lambda self, e: (
f"DEFAULT COLLATE {self.sql(e, 'this')}"
diff --git a/sqlglot/dialects/clickhouse.py b/sqlglot/dialects/clickhouse.py
index 7c6ec11f..08e8781e 100644
--- a/sqlglot/dialects/clickhouse.py
+++ b/sqlglot/dialects/clickhouse.py
@@ -853,6 +853,7 @@ class ClickHouse(Dialect):
SET_OP_MODIFIERS = False
SUPPORTS_TABLE_ALIAS_COLUMNS = False
VALUES_AS_TABLE = False
+ ARRAY_SIZE_NAME = "LENGTH"
STRING_TYPE_MAPPING = {
exp.DataType.Type.CHAR: "String",
@@ -928,7 +929,6 @@ class ClickHouse(Dialect):
exp.AnyValue: rename_func("any"),
exp.ApproxDistinct: rename_func("uniq"),
exp.ArrayFilter: lambda self, e: self.func("arrayFilter", e.expression, e.this),
- exp.ArraySize: rename_func("LENGTH"),
exp.ArraySum: rename_func("arraySum"),
exp.ArgMax: arg_max_or_min_no_count("argMax"),
exp.ArgMin: arg_max_or_min_no_count("argMin"),
diff --git a/sqlglot/dialects/drill.py b/sqlglot/dialects/drill.py
index 88748649..5562123d 100644
--- a/sqlglot/dialects/drill.py
+++ b/sqlglot/dialects/drill.py
@@ -79,6 +79,7 @@ class Drill(Dialect):
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
+ "REPEATED_COUNT": exp.ArraySize.from_arg_list,
"TO_TIMESTAMP": exp.TimeStrToTime.from_arg_list,
"TO_CHAR": build_formatted_time(exp.TimeToStr, "drill"),
"LEVENSHTEIN_DISTANCE": exp.Levenshtein.from_arg_list,
@@ -93,6 +94,7 @@ class Drill(Dialect):
NVL2_SUPPORTED = False
LAST_DAY_SUPPORTS_DATE_PART = False
SUPPORTS_CREATE_TABLE_LIKE = False
+ ARRAY_SIZE_NAME = "REPEATED_COUNT"
TYPE_MAPPING = {
**generator.Generator.TYPE_MAPPING,
@@ -117,7 +119,6 @@ class Drill(Dialect):
**generator.Generator.TRANSFORMS,
exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
exp.ArrayContains: rename_func("REPEATED_CONTAINS"),
- exp.ArraySize: rename_func("REPEATED_COUNT"),
exp.Create: preprocess([move_schema_columns_to_partitioned_by]),
exp.DateAdd: date_add_sql("ADD"),
exp.DateStrToDate: datestrtodate_sql,
diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index a183a883..de569f33 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -507,13 +507,13 @@ class DuckDB(Dialect):
STAR_EXCEPT = "EXCLUDE"
PAD_FILL_PATTERN_IS_REQUIRED = True
ARRAY_CONCAT_IS_VAR_LEN = False
+ ARRAY_SIZE_DIM_REQUIRED = False
TRANSFORMS = {
**generator.Generator.TRANSFORMS,
exp.ApproxDistinct: approx_count_distinct_sql,
exp.Array: inline_array_unless_query,
exp.ArrayFilter: rename_func("LIST_FILTER"),
- exp.ArraySize: rename_func("ARRAY_LENGTH"),
exp.ArgMax: arg_max_or_min_no_count("ARG_MAX"),
exp.ArgMin: arg_max_or_min_no_count("ARG_MIN"),
exp.ArraySort: _array_sort_sql,
diff --git a/sqlglot/dialects/hive.py b/sqlglot/dialects/hive.py
index 0d21e8d8..86b64ef5 100644
--- a/sqlglot/dialects/hive.py
+++ b/sqlglot/dialects/hive.py
@@ -463,6 +463,7 @@ class Hive(Dialect):
PARSE_JSON_NAME = None
PAD_FILL_PATTERN_IS_REQUIRED = True
SUPPORTS_MEDIAN = False
+ ARRAY_SIZE_NAME = "SIZE"
EXPRESSIONS_WITHOUT_NESTED_CTES = {
exp.Insert,
@@ -500,7 +501,6 @@ class Hive(Dialect):
exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
exp.ArrayConcat: rename_func("CONCAT"),
exp.ArrayToString: lambda self, e: self.func("CONCAT_WS", e.expression, e.this),
- exp.ArraySize: rename_func("SIZE"),
exp.ArraySort: _array_sort_sql,
exp.With: no_recursive_cte_sql,
exp.DateAdd: _add_date_sql,
diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py
index 30e96fe9..ce2daf23 100644
--- a/sqlglot/dialects/postgres.py
+++ b/sqlglot/dialects/postgres.py
@@ -496,6 +496,7 @@ class Postgres(Dialect):
COPY_HAS_INTO_KEYWORD = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_MEDIAN = False
+ ARRAY_SIZE_DIM_REQUIRED = True
SUPPORTED_JSON_PATH_PARTS = {
exp.JSONPathKey,
@@ -519,7 +520,6 @@ class Postgres(Dialect):
exp.AnyValue: any_value_to_max_sql,
exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
exp.ArrayFilter: filter_array_using_unnest,
- exp.ArraySize: lambda self, e: self.func("ARRAY_LENGTH", e.this, e.expression or "1"),
exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
exp.ColumnDef: transforms.preprocess([_auto_increment_to_serial, _serial_to_generated]),
exp.CurrentDate: no_paren_current_date_sql,
diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py
index 5c4975d5..221c9142 100644
--- a/sqlglot/dialects/presto.py
+++ b/sqlglot/dialects/presto.py
@@ -321,6 +321,7 @@ class Presto(Dialect):
PAD_FILL_PATTERN_IS_REQUIRED = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_MEDIAN = False
+ ARRAY_SIZE_NAME = "CARDINALITY"
PROPERTIES_LOCATION = {
**generator.Generator.PROPERTIES_LOCATION,
@@ -354,7 +355,6 @@ class Presto(Dialect):
exp.ArrayAny: rename_func("ANY_MATCH"),
exp.ArrayConcat: rename_func("CONCAT"),
exp.ArrayContains: rename_func("CONTAINS"),
- exp.ArraySize: rename_func("CARDINALITY"),
exp.ArrayToString: rename_func("ARRAY_JOIN"),
exp.ArrayUniqueAgg: rename_func("SET_AGG"),
exp.AtTimeZone: rename_func("AT_TIMEZONE"),
diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 57d26061..050bf9cc 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -823,6 +823,7 @@ class Snowflake(Dialect):
SUPPORTS_CONVERT_TIMEZONE = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_MEDIAN = True
+ ARRAY_SIZE_NAME = "ARRAY_SIZE"
TRANSFORMS = {
**generator.Generator.TRANSFORMS,
diff --git a/sqlglot/dialects/teradata.py b/sqlglot/dialects/teradata.py
index f015c122..c04915df 100644
--- a/sqlglot/dialects/teradata.py
+++ b/sqlglot/dialects/teradata.py
@@ -166,6 +166,7 @@ class Teradata(Dialect):
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
+ "CARDINALITY": exp.ArraySize.from_arg_list,
"RANDOM": lambda args: exp.Rand(lower=seq_get(args, 0), upper=seq_get(args, 1)),
}
@@ -227,6 +228,7 @@ class Teradata(Dialect):
LAST_DAY_SUPPORTS_DATE_PART = False
CAN_IMPLEMENT_ARRAY_ANY = True
TZ_TO_WITH_TIME_ZONE = True
+ ARRAY_SIZE_NAME = "CARDINALITY"
TYPE_MAPPING = {
**generator.Generator.TYPE_MAPPING,
@@ -246,7 +248,6 @@ class Teradata(Dialect):
**generator.Generator.TRANSFORMS,
exp.ArgMax: rename_func("MAX_BY"),
exp.ArgMin: rename_func("MIN_BY"),
- exp.ArraySize: rename_func("CARDINALITY"),
exp.Max: max_or_greatest,
exp.Min: min_or_least,
exp.Pow: lambda self, e: self.binary(e, "**"),
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index f04cece1..b0c2a7f5 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -767,6 +767,7 @@ class Expression(metaclass=_Expression):
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
+ wrap: bool = True,
**opts,
) -> Condition:
"""
@@ -781,18 +782,22 @@ class Expression(metaclass=_Expression):
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
+ wrap: whether to wrap the operands in `Paren`s. This is true by default to avoid
+ precedence issues, but can be turned off when the produced AST is too deep and
+ causes recursion-related issues.
opts: other options to use to parse the input expressions.
Returns:
The new And condition.
"""
- return and_(self, *expressions, dialect=dialect, copy=copy, **opts)
+ return and_(self, *expressions, dialect=dialect, copy=copy, wrap=wrap, **opts)
def or_(
self,
*expressions: t.Optional[ExpOrStr],
dialect: DialectType = None,
copy: bool = True,
+ wrap: bool = True,
**opts,
) -> Condition:
"""
@@ -807,12 +812,15 @@ class Expression(metaclass=_Expression):
If an `Expression` instance is passed, it will be used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy the involved expressions (only applies to Expressions).
+ wrap: whether to wrap the operands in `Paren`s. This is true by default to avoid
+ precedence issues, but can be turned off when the produced AST is too deep and
+ causes recursion-related issues.
opts: other options to use to parse the input expressions.
Returns:
The new Or condition.
"""
- return or_(self, *expressions, dialect=dialect, copy=copy, **opts)
+ return or_(self, *expressions, dialect=dialect, copy=copy, wrap=wrap, **opts)
def not_(self, copy: bool = True):
"""
@@ -6921,6 +6929,7 @@ def _combine(
operator: t.Type[Connector],
dialect: DialectType = None,
copy: bool = True,
+ wrap: bool = True,
**opts,
) -> Expression:
conditions = [
@@ -6930,10 +6939,10 @@ def _combine(
]
this, *rest = conditions
- if rest:
+ if rest and wrap:
this = _wrap(this, Connector)
for expression in rest:
- this = operator(this=this, expression=_wrap(expression, Connector))
+ this = operator(this=this, expression=_wrap(expression, Connector) if wrap else expression)
return this
@@ -7316,7 +7325,11 @@ def condition(
def and_(
- *expressions: t.Optional[ExpOrStr], dialect: DialectType = None, copy: bool = True, **opts
+ *expressions: t.Optional[ExpOrStr],
+ dialect: DialectType = None,
+ copy: bool = True,
+ wrap: bool = True,
+ **opts,
) -> Condition:
"""
Combine multiple conditions with an AND logical operator.
@@ -7330,16 +7343,23 @@ def and_(
If an Expression instance is passed, this is used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy `expressions` (only applies to Expressions).
+ wrap: whether to wrap the operands in `Paren`s. This is true by default to avoid
+ precedence issues, but can be turned off when the produced AST is too deep and
+ causes recursion-related issues.
**opts: other options to use to parse the input expressions.
Returns:
The new condition
"""
- return t.cast(Condition, _combine(expressions, And, dialect, copy=copy, **opts))
+ return t.cast(Condition, _combine(expressions, And, dialect, copy=copy, wrap=wrap, **opts))
def or_(
- *expressions: t.Optional[ExpOrStr], dialect: DialectType = None, copy: bool = True, **opts
+ *expressions: t.Optional[ExpOrStr],
+ dialect: DialectType = None,
+ copy: bool = True,
+ wrap: bool = True,
+ **opts,
) -> Condition:
"""
Combine multiple conditions with an OR logical operator.
@@ -7353,16 +7373,23 @@ def or_(
If an Expression instance is passed, this is used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy `expressions` (only applies to Expressions).
+ wrap: whether to wrap the operands in `Paren`s. This is true by default to avoid
+ precedence issues, but can be turned off when the produced AST is too deep and
+ causes recursion-related issues.
**opts: other options to use to parse the input expressions.
Returns:
The new condition
"""
- return t.cast(Condition, _combine(expressions, Or, dialect, copy=copy, **opts))
+ return t.cast(Condition, _combine(expressions, Or, dialect, copy=copy, wrap=wrap, **opts))
def xor(
- *expressions: t.Optional[ExpOrStr], dialect: DialectType = None, copy: bool = True, **opts
+ *expressions: t.Optional[ExpOrStr],
+ dialect: DialectType = None,
+ copy: bool = True,
+ wrap: bool = True,
+ **opts,
) -> Condition:
"""
Combine multiple conditions with an XOR logical operator.
@@ -7376,12 +7403,15 @@ def xor(
If an Expression instance is passed, this is used as-is.
dialect: the dialect used to parse the input expression.
copy: whether to copy `expressions` (only applies to Expressions).
+ wrap: whether to wrap the operands in `Paren`s. This is true by default to avoid
+ precedence issues, but can be turned off when the produced AST is too deep and
+ causes recursion-related issues.
**opts: other options to use to parse the input expressions.
Returns:
The new condition
"""
- return t.cast(Condition, _combine(expressions, Xor, dialect, copy=copy, **opts))
+ return t.cast(Condition, _combine(expressions, Xor, dialect, copy=copy, wrap=wrap, **opts))
def not_(expression: ExpOrStr, dialect: DialectType = None, copy: bool = True, **opts) -> Not:
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index e07e4916..a55712c0 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -442,6 +442,15 @@ class Generator(metaclass=_Generator):
# The name to generate for the JSONPath expression. If `None`, only `this` will be generated
PARSE_JSON_NAME: t.Optional[str] = "PARSE_JSON"
+ # The function name of the exp.ArraySize expression
+ ARRAY_SIZE_NAME: str = "ARRAY_LENGTH"
+
+ # Whether exp.ArraySize should generate the dimension arg too (valid for Postgres & DuckDB)
+ # None -> Doesn't support it at all
+ # False (DuckDB) -> Has backwards-compatible support, but preferably generated without
+ # True (Postgres) -> Explicitly requires it
+ ARRAY_SIZE_DIM_REQUIRED: t.Optional[bool] = None
+
TYPE_MAPPING = {
exp.DataType.Type.NCHAR: "CHAR",
exp.DataType.Type.NVARCHAR: "VARCHAR",
@@ -4487,3 +4496,18 @@ class Generator(metaclass=_Generator):
return self.sql(
exp.TimestampDiff(this=expression.this, expression=start_ts, unit=exp.var("SECONDS"))
)
+
+ def arraysize_sql(self, expression: exp.ArraySize) -> str:
+ dim = expression.expression
+
+ # For dialects that don't support the dimension arg, we can safely transpile it's default value (1st dimension)
+ if dim and self.ARRAY_SIZE_DIM_REQUIRED is None:
+ if not (dim.is_int and dim.name == "1"):
+ self.unsupported("Cannot transpile dimension argument for ARRAY_LENGTH")
+ dim = None
+
+ # If dimension is required but not specified, default initialize it
+ if self.ARRAY_SIZE_DIM_REQUIRED and not dim:
+ dim = exp.Literal.number(1)
+
+ return self.func(self.ARRAY_SIZE_NAME, expression.this, dim)
| Translation Bug With ARRAY_LENGTH function from PostgreSQL to Spark
**Fully reproducible code snippet**
```python
import sqlglot
query = "SELECT array_length(arr, 1) from table"
sqlglot.parse_one(query, dialect='postgres').sql(dialect='spark')
```
the output is: 'SELECT SIZE(arr, 1) FROM table'
the expected output is: 'SELECT SIZE(arr) FROM table'
**Official Documentation**
Please include links to official SQL documentation related to your issue.
1. spark docs: https://spark.apache.org/docs/3.5.1/api/sql/#size
2. postgres docs: https://www.postgresql.org/docs/8.4/functions-array.html | tobymao/sqlglot | diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 9608b33f..ffe08c67 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -51,7 +51,6 @@ class TestPostgres(Validator):
self.validate_identity("x$")
self.validate_identity("SELECT ARRAY[1, 2, 3]")
self.validate_identity("SELECT ARRAY(SELECT 1)")
- self.validate_identity("SELECT ARRAY_LENGTH(ARRAY[1, 2, 3], 1)")
self.validate_identity("STRING_AGG(x, y)")
self.validate_identity("STRING_AGG(x, ',' ORDER BY y)")
self.validate_identity("STRING_AGG(x, ',' ORDER BY y DESC)")
@@ -1242,3 +1241,49 @@ CROSS JOIN JSON_ARRAY_ELEMENTS(CAST(JSON_EXTRACT_PATH(tbox, 'boxes') AS JSON)) A
self.validate_identity(
"""SELECT * FROM table1, ROWS FROM (FUNC1(col1) AS alias1("col1" TEXT)) WITH ORDINALITY AS alias3("col3" INT, "col4" TEXT)"""
)
+
+ def test_array_length(self):
+ self.validate_identity("SELECT ARRAY_LENGTH(ARRAY[1, 2, 3], 1)")
+
+ self.validate_all(
+ "ARRAY_LENGTH(arr, 1)",
+ read={
+ "bigquery": "ARRAY_LENGTH(arr)",
+ "duckdb": "ARRAY_LENGTH(arr)",
+ "presto": "CARDINALITY(arr)",
+ "drill": "REPEATED_COUNT(arr)",
+ "teradata": "CARDINALITY(arr)",
+ "hive": "SIZE(arr)",
+ "spark2": "SIZE(arr)",
+ "spark": "SIZE(arr)",
+ "databricks": "SIZE(arr)",
+ },
+ write={
+ "duckdb": "ARRAY_LENGTH(arr, 1)",
+ "presto": "CARDINALITY(arr)",
+ "teradata": "CARDINALITY(arr)",
+ "bigquery": "ARRAY_LENGTH(arr)",
+ "drill": "REPEATED_COUNT(arr)",
+ "clickhouse": "LENGTH(arr)",
+ "hive": "SIZE(arr)",
+ "spark2": "SIZE(arr)",
+ "spark": "SIZE(arr)",
+ "databricks": "SIZE(arr)",
+ },
+ )
+
+ self.validate_all(
+ "ARRAY_LENGTH(arr, foo)",
+ write={
+ "duckdb": "ARRAY_LENGTH(arr, foo)",
+ "hive": UnsupportedError,
+ "spark2": UnsupportedError,
+ "spark": UnsupportedError,
+ "databricks": UnsupportedError,
+ "presto": UnsupportedError,
+ "teradata": UnsupportedError,
+ "bigquery": UnsupportedError,
+ "drill": UnsupportedError,
+ "clickhouse": UnsupportedError,
+ },
+ )
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 11
} | 25.29 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
identify==2.6.9
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
Pygments==2.19.1
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.7.2
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@a665030323b200f3bed241bb928993b9807c4100#egg=sqlglot
tomli==2.2.1
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- identify==2.6.9
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pygments==2.19.1
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.7.2
- six==1.17.0
- tomli==2.2.1
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_postgres.py::TestPostgres::test_array_length"
] | [] | [
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_ddl",
"tests/dialects/test_postgres.py::TestPostgres::test_operator",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_rows_from",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest_json_array",
"tests/dialects/test_postgres.py::TestPostgres::test_variance"
] | [] | MIT License | 20,208 | 4,425 | [
"sqlglot/dialects/bigquery.py",
"sqlglot/dialects/clickhouse.py",
"sqlglot/dialects/drill.py",
"sqlglot/dialects/duckdb.py",
"sqlglot/dialects/hive.py",
"sqlglot/dialects/postgres.py",
"sqlglot/dialects/presto.py",
"sqlglot/dialects/snowflake.py",
"sqlglot/dialects/teradata.py",
"sqlglot/expressions.py",
"sqlglot/generator.py"
] |
|
tobymao__sqlglot-4373 | a71cee4b4eafad9988b945c69dc75583ae105ec7 | 2024-11-11 17:01:27 | a71cee4b4eafad9988b945c69dc75583ae105ec7 | diff --git a/sqlglot/dialects/databricks.py b/sqlglot/dialects/databricks.py
index d1127f86..ed5e6423 100644
--- a/sqlglot/dialects/databricks.py
+++ b/sqlglot/dialects/databricks.py
@@ -7,7 +7,6 @@ from sqlglot.dialects.dialect import (
date_delta_sql,
build_date_delta,
timestamptrunc_sql,
- timestampdiff_sql,
)
from sqlglot.dialects.spark import Spark
from sqlglot.tokens import TokenType
@@ -46,7 +45,6 @@ class Databricks(Spark):
"DATE_ADD": build_date_delta(exp.DateAdd),
"DATEDIFF": build_date_delta(exp.DateDiff),
"DATE_DIFF": build_date_delta(exp.DateDiff),
- "TIMESTAMPDIFF": build_date_delta(exp.TimestampDiff),
"GET_JSON_OBJECT": _build_json_extract,
}
@@ -75,8 +73,6 @@ class Databricks(Spark):
exp.Mul(this=e.expression, expression=exp.Literal.number(-1)),
e.this,
),
- exp.DatetimeDiff: timestampdiff_sql,
- exp.TimestampDiff: timestampdiff_sql,
exp.DatetimeTrunc: timestamptrunc_sql(),
exp.Select: transforms.preprocess(
[
diff --git a/sqlglot/dialects/spark.py b/sqlglot/dialects/spark.py
index a58b708d..08d99e4e 100644
--- a/sqlglot/dialects/spark.py
+++ b/sqlglot/dialects/spark.py
@@ -3,7 +3,7 @@ from __future__ import annotations
import typing as t
from sqlglot import exp
-from sqlglot.dialects.dialect import rename_func, unit_to_var
+from sqlglot.dialects.dialect import rename_func, unit_to_var, timestampdiff_sql, build_date_delta
from sqlglot.dialects.hive import _build_with_ignore_nulls
from sqlglot.dialects.spark2 import Spark2, temporary_storage_provider, _build_as_cast
from sqlglot.helper import ensure_list, seq_get
@@ -108,6 +108,7 @@ class Spark(Spark2):
"DATE_ADD": _build_dateadd,
"DATEADD": _build_dateadd,
"TIMESTAMPADD": _build_dateadd,
+ "TIMESTAMPDIFF": build_date_delta(exp.TimestampDiff),
"DATEDIFF": _build_datediff,
"DATE_DIFF": _build_datediff,
"TIMESTAMP_LTZ": _build_as_cast("TIMESTAMP_LTZ"),
@@ -167,6 +168,8 @@ class Spark(Spark2):
exp.StartsWith: rename_func("STARTSWITH"),
exp.TsOrDsAdd: _dateadd_sql,
exp.TimestampAdd: _dateadd_sql,
+ exp.DatetimeDiff: timestampdiff_sql,
+ exp.TimestampDiff: timestampdiff_sql,
exp.TryCast: lambda self, e: (
self.trycast_sql(e) if e.args.get("safe") else self.cast_sql(e)
),
| TIMESTAMPDIFF is not working property for Spark SQL
TIMESTAMPDIFF is generating incorrect output for output dialects `spark`
Input dialect: `databricks`
Output dialect: `spark`
<img width="526" alt="image" src="https://github.com/user-attachments/assets/4621a61c-bf2d-4ff6-aa81-681eaa8c377f">
Correct Output is `SELECT TIMESTAMPDIFF(DAY, a, b)`
| tobymao/sqlglot | diff --git a/tests/dialects/test_spark.py b/tests/dialects/test_spark.py
index 486bf792..1aa5c213 100644
--- a/tests/dialects/test_spark.py
+++ b/tests/dialects/test_spark.py
@@ -754,6 +754,17 @@ TBLPROPERTIES (
},
)
+ self.validate_all(
+ "SELECT TIMESTAMPDIFF(MONTH, foo, bar)",
+ read={
+ "databricks": "SELECT TIMESTAMPDIFF(MONTH, foo, bar)",
+ },
+ write={
+ "spark": "SELECT TIMESTAMPDIFF(MONTH, foo, bar)",
+ "databricks": "SELECT TIMESTAMPDIFF(MONTH, foo, bar)",
+ },
+ )
+
def test_bool_or(self):
self.validate_all(
"SELECT a, LOGICAL_OR(b) FROM table GROUP BY a",
diff --git a/tests/test_parser.py b/tests/test_parser.py
index 3e5e27e9..ba1240c4 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -712,20 +712,20 @@ class TestParser(unittest.TestCase):
self.assertEqual(expected_columns, [col.sql(dialect=dialect) for col in columns])
def test_parse_nested(self):
- now = time.time()
- query = parse_one("SELECT * FROM a " + ("LEFT JOIN b ON a.id = b.id " * 38))
- self.assertIsNotNone(query)
- self.assertLessEqual(time.time() - now, 0.1)
-
- now = time.time()
- query = parse_one("SELECT * FROM a " + ("LEFT JOIN UNNEST(ARRAY[]) " * 15))
- self.assertIsNotNone(query)
- self.assertLessEqual(time.time() - now, 0.1)
-
- now = time.time()
- query = parse_one("SELECT * FROM a " + ("OUTER APPLY (SELECT * FROM b) " * 30))
- self.assertIsNotNone(query)
- self.assertLessEqual(time.time() - now, 0.1)
+ def warn_over_threshold(query: str, max_threshold: float = 0.2):
+ now = time.time()
+ ast = parse_one(query)
+ end = time.time() - now
+
+ self.assertIsNotNone(ast)
+ if end >= max_threshold:
+ parser_logger.warning(
+ f"Query {query[:100]}... surpassed the time threshold of {max_threshold} seconds"
+ )
+
+ warn_over_threshold("SELECT * FROM a " + ("LEFT JOIN b ON a.id = b.id " * 38))
+ warn_over_threshold("SELECT * FROM a " + ("LEFT JOIN UNNEST(ARRAY[]) " * 15))
+ warn_over_threshold("SELECT * FROM a " + ("OUTER APPLY (SELECT * FROM b) " * 30))
def test_parse_properties(self):
self.assertEqual(
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 25.29 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
identify==2.6.9
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pre_commit==4.2.0
Pygments==2.19.1
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.7.2
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@a71cee4b4eafad9988b945c69dc75583ae105ec7#egg=sqlglot
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- duckdb==1.2.1
- filelock==3.18.0
- identify==2.6.9
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pre-commit==4.2.0
- pygments==2.19.1
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.7.2
- six==1.17.0
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_spark.py::TestSpark::test_spark"
] | [] | [
"tests/dialects/test_spark.py::TestSpark::test_bool_or",
"tests/dialects/test_spark.py::TestSpark::test_current_user",
"tests/dialects/test_spark.py::TestSpark::test_ddl",
"tests/dialects/test_spark.py::TestSpark::test_explode_to_unnest",
"tests/dialects/test_spark.py::TestSpark::test_hint",
"tests/dialects/test_spark.py::TestSpark::test_insert_cte",
"tests/dialects/test_spark.py::TestSpark::test_minus",
"tests/dialects/test_spark.py::TestSpark::test_schema_binding_options",
"tests/dialects/test_spark.py::TestSpark::test_string",
"tests/dialects/test_spark.py::TestSpark::test_strip_modifiers",
"tests/dialects/test_spark.py::TestSpark::test_to_date",
"tests/dialects/test_spark.py::TestSpark::test_transform_query",
"tests/test_parser.py::TestParser::test_alter_set",
"tests/test_parser.py::TestParser::test_collate",
"tests/test_parser.py::TestParser::test_column",
"tests/test_parser.py::TestParser::test_command",
"tests/test_parser.py::TestParser::test_comment_error_n",
"tests/test_parser.py::TestParser::test_comment_error_r",
"tests/test_parser.py::TestParser::test_comments_delete",
"tests/test_parser.py::TestParser::test_comments_delete_cte",
"tests/test_parser.py::TestParser::test_comments_insert",
"tests/test_parser.py::TestParser::test_comments_insert_cte",
"tests/test_parser.py::TestParser::test_comments_select",
"tests/test_parser.py::TestParser::test_comments_select_cte",
"tests/test_parser.py::TestParser::test_comments_update",
"tests/test_parser.py::TestParser::test_comments_update_cte",
"tests/test_parser.py::TestParser::test_create_table_error",
"tests/test_parser.py::TestParser::test_distinct_from",
"tests/test_parser.py::TestParser::test_expression",
"tests/test_parser.py::TestParser::test_float",
"tests/test_parser.py::TestParser::test_identify",
"tests/test_parser.py::TestParser::test_lambda_struct",
"tests/test_parser.py::TestParser::test_missing_by",
"tests/test_parser.py::TestParser::test_multi",
"tests/test_parser.py::TestParser::test_odbc_date_literals",
"tests/test_parser.py::TestParser::test_parameter",
"tests/test_parser.py::TestParser::test_parse_concat_ws",
"tests/test_parser.py::TestParser::test_parse_create_schema",
"tests/test_parser.py::TestParser::test_parse_drop_schema",
"tests/test_parser.py::TestParser::test_parse_empty",
"tests/test_parser.py::TestParser::test_parse_errors",
"tests/test_parser.py::TestParser::test_parse_floats",
"tests/test_parser.py::TestParser::test_parse_intervals",
"tests/test_parser.py::TestParser::test_parse_into",
"tests/test_parser.py::TestParser::test_parse_into_error",
"tests/test_parser.py::TestParser::test_parse_into_errors",
"tests/test_parser.py::TestParser::test_parse_nested",
"tests/test_parser.py::TestParser::test_parse_prop_eq",
"tests/test_parser.py::TestParser::test_parse_properties",
"tests/test_parser.py::TestParser::test_parse_terse_coalesce",
"tests/test_parser.py::TestParser::test_pivot_columns",
"tests/test_parser.py::TestParser::test_pretty_config_override",
"tests/test_parser.py::TestParser::test_select",
"tests/test_parser.py::TestParser::test_set_expression",
"tests/test_parser.py::TestParser::test_space",
"tests/test_parser.py::TestParser::test_structs",
"tests/test_parser.py::TestParser::test_table",
"tests/test_parser.py::TestParser::test_trailing_comments",
"tests/test_parser.py::TestParser::test_transactions",
"tests/test_parser.py::TestParser::test_tuple",
"tests/test_parser.py::TestParser::test_type_literals",
"tests/test_parser.py::TestParser::test_unary_plus",
"tests/test_parser.py::TestParser::test_union",
"tests/test_parser.py::TestParser::test_unnest",
"tests/test_parser.py::TestParser::test_unnest_projection",
"tests/test_parser.py::TestParser::test_values_as_identifier",
"tests/test_parser.py::TestParser::test_var"
] | [] | MIT License | 20,214 | 717 | [
"sqlglot/dialects/databricks.py",
"sqlglot/dialects/spark.py"
] |
|
tobymao__sqlglot-4390 | e7b67e0c280179188ce1bca650735978b758dca1 | 2024-11-14 09:29:07 | 37c4809dfda48224fd982ea8a48d3dbc5c17f9ae | diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index a2f118fb..5fa7d1ef 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -1738,9 +1738,13 @@ class Parser(metaclass=_Parser):
concurrently = self._match_text_seq("CONCURRENTLY")
if_exists = exists or self._parse_exists()
- table = self._parse_table_parts(
- schema=True, is_db_reference=self._prev.token_type == TokenType.SCHEMA
- )
+
+ if kind == "COLUMN":
+ this = self._parse_column()
+ else:
+ this = self._parse_table_parts(
+ schema=True, is_db_reference=self._prev.token_type == TokenType.SCHEMA
+ )
cluster = self._parse_on_property() if self._match(TokenType.ON) else None
@@ -1752,7 +1756,7 @@ class Parser(metaclass=_Parser):
return self.expression(
exp.Drop,
exists=if_exists,
- this=table,
+ this=this,
expressions=expressions,
kind=self.dialect.CREATABLE_KIND_MAPPING.get(kind) or kind,
temporary=temporary,
| Simple query, wrong tables list
Very simple query, wrong tables list! it considers `activity_id` as a table!
```python
sql='ALTER TABLE ride DROP COLUMN activity_id'
list(sqlglot.parse_one(sql, read='mysql').find_all(sqlglot.exp.Table))
# list(sqlglot.parse_one(sql, read='mysql').find_all(sqlglot.exp.Table))
# 0 = {Table} Table(\n this=Identifier(this=ride, quoted=False))
# 1 = {Table} Table(\n this=Identifier(this=activity_id, quoted=False))
```
| tobymao/sqlglot | diff --git a/tests/test_parser.py b/tests/test_parser.py
index ba1240c4..b60d7193 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -879,3 +879,8 @@ class TestParser(unittest.TestCase):
expr = parse_one(sql)
self.assertIsInstance(expr, exp.Insert)
self.assertIsInstance(expr.expression.expressions[0].expressions[0], cls)
+
+ def test_drop_column(self):
+ ast = parse_one("ALTER TABLE tbl DROP COLUMN col")
+ self.assertEqual(len(list(ast.find_all(exp.Table))), 1)
+ self.assertEqual(len(list(ast.find_all(exp.Column))), 1)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 25.30 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
filelock==3.18.0
identify==2.6.9
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pre_commit==4.2.0
Pygments==2.19.1
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.7.2
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@e7b67e0c280179188ce1bca650735978b758dca1#egg=sqlglot
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- duckdb==1.2.1
- execnet==2.1.1
- filelock==3.18.0
- identify==2.6.9
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pre-commit==4.2.0
- pygments==2.19.1
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.7.2
- six==1.17.0
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/test_parser.py::TestParser::test_drop_column"
] | [] | [
"tests/test_parser.py::TestParser::test_alter_set",
"tests/test_parser.py::TestParser::test_collate",
"tests/test_parser.py::TestParser::test_column",
"tests/test_parser.py::TestParser::test_command",
"tests/test_parser.py::TestParser::test_comment_error_n",
"tests/test_parser.py::TestParser::test_comment_error_r",
"tests/test_parser.py::TestParser::test_comments_delete",
"tests/test_parser.py::TestParser::test_comments_delete_cte",
"tests/test_parser.py::TestParser::test_comments_insert",
"tests/test_parser.py::TestParser::test_comments_insert_cte",
"tests/test_parser.py::TestParser::test_comments_select",
"tests/test_parser.py::TestParser::test_comments_select_cte",
"tests/test_parser.py::TestParser::test_comments_update",
"tests/test_parser.py::TestParser::test_comments_update_cte",
"tests/test_parser.py::TestParser::test_create_table_error",
"tests/test_parser.py::TestParser::test_distinct_from",
"tests/test_parser.py::TestParser::test_expression",
"tests/test_parser.py::TestParser::test_float",
"tests/test_parser.py::TestParser::test_identify",
"tests/test_parser.py::TestParser::test_lambda_struct",
"tests/test_parser.py::TestParser::test_missing_by",
"tests/test_parser.py::TestParser::test_multi",
"tests/test_parser.py::TestParser::test_odbc_date_literals",
"tests/test_parser.py::TestParser::test_parameter",
"tests/test_parser.py::TestParser::test_parse_concat_ws",
"tests/test_parser.py::TestParser::test_parse_create_schema",
"tests/test_parser.py::TestParser::test_parse_drop_schema",
"tests/test_parser.py::TestParser::test_parse_empty",
"tests/test_parser.py::TestParser::test_parse_errors",
"tests/test_parser.py::TestParser::test_parse_floats",
"tests/test_parser.py::TestParser::test_parse_intervals",
"tests/test_parser.py::TestParser::test_parse_into",
"tests/test_parser.py::TestParser::test_parse_into_error",
"tests/test_parser.py::TestParser::test_parse_into_errors",
"tests/test_parser.py::TestParser::test_parse_nested",
"tests/test_parser.py::TestParser::test_parse_prop_eq",
"tests/test_parser.py::TestParser::test_parse_properties",
"tests/test_parser.py::TestParser::test_parse_terse_coalesce",
"tests/test_parser.py::TestParser::test_pivot_columns",
"tests/test_parser.py::TestParser::test_pretty_config_override",
"tests/test_parser.py::TestParser::test_select",
"tests/test_parser.py::TestParser::test_set_expression",
"tests/test_parser.py::TestParser::test_space",
"tests/test_parser.py::TestParser::test_structs",
"tests/test_parser.py::TestParser::test_table",
"tests/test_parser.py::TestParser::test_trailing_comments",
"tests/test_parser.py::TestParser::test_transactions",
"tests/test_parser.py::TestParser::test_tuple",
"tests/test_parser.py::TestParser::test_type_literals",
"tests/test_parser.py::TestParser::test_unary_plus",
"tests/test_parser.py::TestParser::test_union",
"tests/test_parser.py::TestParser::test_unnest",
"tests/test_parser.py::TestParser::test_unnest_projection",
"tests/test_parser.py::TestParser::test_values_as_identifier",
"tests/test_parser.py::TestParser::test_var"
] | [] | MIT License | 20,232 | 286 | [
"sqlglot/parser.py"
] |
|
tobymao__sqlglot-4393 | 37c4809dfda48224fd982ea8a48d3dbc5c17f9ae | 2024-11-14 13:28:54 | 37c4809dfda48224fd982ea8a48d3dbc5c17f9ae | diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 80bd7bd1..97b87908 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -106,11 +106,14 @@ def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
# https://docs.snowflake.com/en/sql-reference/functions/div0
def _build_if_from_div0(args: t.List) -> exp.If:
- cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0)).and_(
- exp.Is(this=seq_get(args, 0), expression=exp.null()).not_()
+ lhs = exp._wrap(seq_get(args, 0), exp.Binary)
+ rhs = exp._wrap(seq_get(args, 1), exp.Binary)
+
+ cond = exp.EQ(this=rhs, expression=exp.Literal.number(0)).and_(
+ exp.Is(this=lhs, expression=exp.null()).not_()
)
true = exp.Literal.number(0)
- false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
+ false = exp.Div(this=lhs, expression=rhs)
return exp.If(this=cond, true=true, false=false)
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index 7bc2f6e4..679a844e 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -6972,7 +6972,15 @@ def _combine(
return this
-def _wrap(expression: E, kind: t.Type[Expression]) -> E | Paren:
[email protected]
+def _wrap(expression: None, kind: t.Type[Expression]) -> None: ...
+
+
[email protected]
+def _wrap(expression: E, kind: t.Type[Expression]) -> E | Paren: ...
+
+
+def _wrap(expression: t.Optional[E], kind: t.Type[Expression]) -> t.Optional[E] | Paren:
return Paren(this=expression) if isinstance(expression, kind) else expression
| `div0()` parsing bug in `parse_one(sql, read=sqlglot.Dialects.SNOWFLAKE)`
The following snippet:
```python
import sqlglot
sql = """
select
div0(2 - 1, 1)
;
"""
print(sql)
```
results in:
```sql
select
case when 1 = 0 and not 2 - 1 is null then
0
else
2 - 1 / 1
end
```
which is not correct and results in incorrect calculations because the division operator `/` takes priority. The `else` clause should be `(2 - 1) / 1` instead.
Thanks for looking into it!
https://docs.snowflake.com/en/sql-reference/functions/div0 | tobymao/sqlglot | diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index e2db661e..515a07c4 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -605,6 +605,17 @@ WHERE
"duckdb": "CASE WHEN bar = 0 AND NOT foo IS NULL THEN 0 ELSE foo / bar END",
},
)
+ self.validate_all(
+ "DIV0(a - b, c - d)",
+ write={
+ "snowflake": "IFF((c - d) = 0 AND NOT (a - b) IS NULL, 0, (a - b) / (c - d))",
+ "sqlite": "IIF((c - d) = 0 AND NOT (a - b) IS NULL, 0, CAST((a - b) AS REAL) / (c - d))",
+ "presto": "IF((c - d) = 0 AND NOT (a - b) IS NULL, 0, CAST((a - b) AS DOUBLE) / (c - d))",
+ "spark": "IF((c - d) = 0 AND NOT (a - b) IS NULL, 0, (a - b) / (c - d))",
+ "hive": "IF((c - d) = 0 AND NOT (a - b) IS NULL, 0, (a - b) / (c - d))",
+ "duckdb": "CASE WHEN (c - d) = 0 AND NOT (a - b) IS NULL THEN 0 ELSE (a - b) / (c - d) END",
+ },
+ )
self.validate_all(
"ZEROIFNULL(foo)",
write={
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 25.30 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
filelock==3.18.0
identify==2.6.9
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pre_commit==4.2.0
Pygments==2.19.1
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.7.2
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@37c4809dfda48224fd982ea8a48d3dbc5c17f9ae#egg=sqlglot
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- duckdb==1.2.1
- execnet==2.1.1
- filelock==3.18.0
- identify==2.6.9
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pre-commit==4.2.0
- pygments==2.19.1
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.7.2
- six==1.17.0
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] | [] | [
"tests/dialects/test_snowflake.py::TestSnowflake::test_alter_set_unset",
"tests/dialects/test_snowflake.py::TestSnowflake::test_copy",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_from_changes",
"tests/dialects/test_snowflake.py::TestSnowflake::test_grant",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_querying_semi_structured_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_columns",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_imported_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_objects",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_primary_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_schemas",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_sequences",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_tables",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_unique_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_users",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_views",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_storage_integration",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values"
] | [] | MIT License | 20,233 | 543 | [
"sqlglot/dialects/snowflake.py",
"sqlglot/expressions.py"
] |
|
camptocamp__python-geoservercloud-52 | 3df141679ce7f33851294ff1348fa113ec62d304 | 2024-11-18 09:26:31 | 8500c2749001d5f130451a908e466a7109849ee0 | diff --git a/geoservercloud/models/featuretype.py b/geoservercloud/models/featuretype.py
index 2b761d4..d117d47 100644
--- a/geoservercloud/models/featuretype.py
+++ b/geoservercloud/models/featuretype.py
@@ -41,7 +41,7 @@ class FeatureType(EntityModel):
namespace_name: str | None = None,
title: dict[str, str] | str | None = None,
abstract: dict[str, str] | str | None = None,
- keywords: list[str] | None = [],
+ keywords: list[str] | None = None,
native_bounding_box: dict[str, Any] | None = None,
lat_lon_bounding_box: dict[str, Any] | None = None,
attributes: list[dict[str, Any]] | None = None,
@@ -130,7 +130,6 @@ class FeatureType(EntityModel):
title=title,
abstract=abstract,
srs=feature_type["srs"],
- keywords=feature_type["keywords"]["string"],
attributes=feature_type["attributes"]["attribute"],
metadata_links=metadata_links,
enabled=feature_type["enabled"],
@@ -142,6 +141,7 @@ class FeatureType(EntityModel):
advertised=feature_type.get("advertised"),
native_bounding_box=feature_type.get("nativeBoundingBox"),
lat_lon_bounding_box=feature_type.get("latLonBoundingBox"),
+ keywords=feature_type.get("keywords", {}).get("string", []),
encode_measures=feature_type.get("encodeMeasures"),
forced_decimals=feature_type.get("forcedDecimals"),
simple_conversion_enabled=feature_type.get("simpleConversionEnabled"),
@@ -194,8 +194,10 @@ class FeatureType(EntityModel):
def post_payload(self) -> dict[str, Any]:
content = self.asdict()
- content["attributes"] = {"attribute": self.attributes}
- content["keywords"] = {"string": self.keywords}
+ if self.attributes is not None:
+ content["attributes"] = {"attribute": self.attributes}
+ if self.keywords is not None:
+ content["keywords"] = {"string": self.keywords}
return {"featureType": content}
def put_payload(self) -> dict[str, Any]:
| Add support for FeatureType without `keyword`
```
File ~/code/python-geoservercloud/geoservercloud/models/featuretype.py:133, in FeatureType.from_get_response_payload(cls, content)
121 else:
122 metadata_links = None
124 return cls(
125 namespace_name=feature_type["namespace"]["name"],
126 name=feature_type["name"],
127 native_name=feature_type["nativeName"],
128 workspace_name=workspace_name,
129 store_name=store_name,
130 title=title,
131 abstract=abstract,
132 srs=feature_type["srs"],
--> 133 keywords=feature_type["keywords"]["string"],
134 attributes=feature_type["attributes"]["attribute"],
135 metadata_links=metadata_links,
136 enabled=feature_type["enabled"],
137 circular_arc_present=feature_type["circularArcPresent"],
138 overriding_service_srs=feature_type["overridingServiceSRS"],
139 pad_with_zeros=feature_type["padWithZeros"],
140 projection_policy=feature_type["projectionPolicy"],
141 service_configuration=feature_type["serviceConfiguration"],
142 advertised=feature_type.get("advertised"),
143 encode_measures=feature_type.get("encodeMeasures"),
144 forced_decimals=feature_type.get("forcedDecimals"),
145 simple_conversion_enabled=feature_type.get("simpleConversionEnabled"),
146 skip_number_match=feature_type.get("skipNumberMatch"),
147 )
KeyError: 'keywords'
``` | camptocamp/python-geoservercloud | diff --git a/tests/models/test_featuretype.py b/tests/models/test_featuretype.py
index 100a112..b1ce5f3 100644
--- a/tests/models/test_featuretype.py
+++ b/tests/models/test_featuretype.py
@@ -252,3 +252,29 @@ def test_featuretype_repr():
expected_repr = json.dumps(feature_type.post_payload(), indent=4)
assert repr(feature_type) == expected_repr
+
+
+def test_featuretype_create_no_keyword():
+ feature_type = FeatureType(
+ namespace_name="test_namespace",
+ workspace_name="test_workspace",
+ store_name="test_store",
+ name="test_name",
+ native_name="test_native_name",
+ keywords=None,
+ )
+
+ assert feature_type.post_payload()["featureType"].get("keywords") is None
+
+ feature_type = FeatureType(
+ namespace_name="test_namespace",
+ workspace_name="test_workspace",
+ store_name="test_store",
+ name="test_name",
+ native_name="test_native_name",
+ keywords=[],
+ )
+
+ assert (
+ feature_type.post_payload()["featureType"].get("keywords").get("string") == []
+ )
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.10",
"reqs_path": [
"ci/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
build==1.2.2.post1
c2cciutils==1.7.0
CacheControl==0.14.2
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
cfgv==3.4.0
chardet==5.2.0
charset-normalizer==3.4.1
cleo==2.1.0
crashtest==0.4.1
cryptography==44.0.2
debian_inspector==31.1.0
defusedxml==0.7.1
distlib==0.3.9
docutils==0.21.2
dulwich==0.21.7
dunamai==1.23.1
exceptiongroup==1.2.2
fastjsonschema==2.21.1
filelock==3.18.0
-e git+https://github.com/camptocamp/python-geoservercloud.git@3df141679ce7f33851294ff1348fa113ec62d304#egg=geoservercloud
google-api-core==2.24.2
google-api-python-client==2.166.0
google-auth==2.38.0
google-auth-httplib2==0.2.0
google-auth-oauthlib==1.2.1
googleapis-common-protos==1.69.2
httplib2==0.22.0
id==1.5.0
identify==2.6.9
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
installer==0.7.0
jaraco.classes==3.4.0
jeepney==0.9.0
Jinja2==3.1.6
keyring==24.3.1
lxml==5.3.1
Markdown==3.7
markdown-it-py==3.0.0
markdown-table==2020.12.3
MarkupSafe==3.0.2
mdurl==0.1.2
more-itertools==10.6.0
msgpack==1.1.0
nh3==0.2.21
nodeenv==1.9.1
oauthlib==3.2.2
OWSLib==0.32.0
packaging==24.2
pexpect==4.9.0
pkginfo==1.12.1.2
platformdirs==4.3.7
pluggy==1.5.0
poetry==1.8.5
poetry-core==1.9.1
poetry-dynamic-versioning==1.4.1
poetry-plugin-drop-python-upper-constraint==0.1.0
poetry-plugin-export==1.8.0
poetry-plugin-tweak-dependencies-version==1.5.2
pre_commit==4.0.1
proto-plus==1.26.1
protobuf==6.30.2
ptyprocess==0.7.0
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycparser==2.22
Pygments==2.19.1
pyparsing==3.2.3
pyproject_hooks==1.2.0
pytest==8.3.3
pytest-mock==3.14.0
python-dateutil==2.9.0.post0
PyYAML==6.0.2
RapidFuzz==3.12.2
readme_renderer==44.0
requests==2.32.3
requests-oauthlib==2.0.0
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
rsa==4.9
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
SecretStorage==3.3.3
security.md==0.2.3
shellingham==1.5.4
six==1.17.0
tomli==2.2.1
tomlkit==0.13.2
trove-classifiers==2025.3.19.19
twine==5.0.0
types-requests==2.32.0.20241016
types-xmltodict==0.14.0.20241009
typing_extensions==4.13.0
uritemplate==4.1.1
urllib3==2.3.0
virtualenv==20.29.3
xmltodict==0.14.2
zipp==3.21.0
| name: python-geoservercloud
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- build==1.2.2.post1
- c2cciutils==1.7.0
- cachecontrol==0.14.2
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- cfgv==3.4.0
- chardet==5.2.0
- charset-normalizer==3.4.1
- cleo==2.1.0
- crashtest==0.4.1
- cryptography==44.0.2
- debian-inspector==31.1.0
- defusedxml==0.7.1
- distlib==0.3.9
- docutils==0.21.2
- dulwich==0.21.7
- dunamai==1.23.1
- exceptiongroup==1.2.2
- fastjsonschema==2.21.1
- filelock==3.18.0
- geoservercloud==0.0.0
- google-api-core==2.24.2
- google-api-python-client==2.166.0
- google-auth==2.38.0
- google-auth-httplib2==0.2.0
- google-auth-oauthlib==1.2.1
- googleapis-common-protos==1.69.2
- httplib2==0.22.0
- id==1.5.0
- identify==2.6.9
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- installer==0.7.0
- jaraco-classes==3.4.0
- jeepney==0.9.0
- jinja2==3.1.6
- keyring==24.3.1
- lxml==5.3.1
- markdown==3.7
- markdown-it-py==3.0.0
- markdown-table==2020.12.3
- markupsafe==3.0.2
- mdurl==0.1.2
- more-itertools==10.6.0
- msgpack==1.1.0
- nh3==0.2.21
- nodeenv==1.9.1
- oauthlib==3.2.2
- owslib==0.32.0
- packaging==24.2
- pexpect==4.9.0
- pkginfo==1.12.1.2
- platformdirs==4.3.7
- pluggy==1.5.0
- poetry==1.8.5
- poetry-core==1.9.1
- poetry-dynamic-versioning==1.4.1
- poetry-plugin-drop-python-upper-constraint==0.1.0
- poetry-plugin-export==1.8.0
- poetry-plugin-tweak-dependencies-version==1.5.2
- pre-commit==4.0.1
- proto-plus==1.26.1
- protobuf==6.30.2
- ptyprocess==0.7.0
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pycparser==2.22
- pygments==2.19.1
- pyparsing==3.2.3
- pyproject-hooks==1.2.0
- pytest==8.3.3
- pytest-mock==3.14.0
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- rapidfuzz==3.12.2
- readme-renderer==44.0
- requests==2.32.3
- requests-oauthlib==2.0.0
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- rsa==4.9
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.12
- secretstorage==3.3.3
- security-md==0.2.3
- shellingham==1.5.4
- six==1.17.0
- tomli==2.2.1
- tomlkit==0.13.2
- trove-classifiers==2025.3.19.19
- twine==5.0.0
- types-requests==2.32.0.20241016
- types-xmltodict==0.14.0.20241009
- typing-extensions==4.13.0
- uritemplate==4.1.1
- urllib3==2.3.0
- virtualenv==20.29.3
- xmltodict==0.14.2
- zipp==3.21.0
prefix: /opt/conda/envs/python-geoservercloud
| [
"tests/models/test_featuretype.py::test_featuretype_create_no_keyword"
] | [] | [
"tests/models/test_featuretype.py::test_featuretype_initialization",
"tests/models/test_featuretype.py::test_featuretype_post_payload",
"tests/models/test_featuretype.py::test_featuretype_create_metadata_link",
"tests/models/test_featuretype.py::test_featuretype_from_get_response_payload",
"tests/models/test_featuretype.py::test_featuretype_repr"
] | [] | BSD 2-Clause "Simplified" License | 20,256 | 522 | [
"geoservercloud/models/featuretype.py"
] |
|
tobymao__sqlglot-4415 | 122ef5f41c4e29347026a81e6f6460ccf8e910ed | 2024-11-18 10:56:18 | a2bde2e03e9ef8650756bf304db35b4876746d1f | diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 5fa7d1ef..6e7b21a6 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -5109,9 +5109,8 @@ class Parser(metaclass=_Parser):
else:
field = self._parse_field(any_token=True, anonymous_func=True)
- if isinstance(field, exp.Func) and this:
- # bigquery allows function calls like x.y.count(...)
- # SAFE.SUBSTR(...)
+ if isinstance(field, (exp.Func, exp.Window)) and this:
+ # BQ & snowflake allow function calls like x.y.count(...), SAFE.SUBSTR(...) etc
# https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-reference#function_call_rules
this = exp.replace_tree(
this,
@@ -5135,6 +5134,11 @@ class Parser(metaclass=_Parser):
db=this.args.get("table"),
catalog=this.args.get("db"),
)
+ elif isinstance(field, exp.Window):
+ # Move the exp.Dot's to the window's function
+ window_func = self.expression(exp.Dot, this=this, expression=field.this)
+ field.set("this", window_func)
+ this = field
else:
this = self.expression(exp.Dot, this=this, expression=field)
| User defined function recognized as column (Snowflake dialect)
This query:
`select COL1,COL2 from some_table,TABLE(SOME_DB.SOME_SCHEMA.TABLE_FUNC(value1, value2) over (PARTITION BY value1))`
`p = sqlglot.parse_one(query, dialect=sqlglot.Dialects.SNOWFLAKE)`
creates this ast:
`Select(
expressions=[
Column(
this=Identifier(this=COL1, quoted=False)),
Column(
this=Identifier(this=COL2, quoted=False))],
from=From(
this=Table(
this=Identifier(this=some_table, quoted=False))),
joins=[
Join(
this=Table(
this=Anonymous(
this=TABLE,
expressions=[
Column(
this=Window(
this=Anonymous(
this=TABLE_FUNC,
expressions=[
Column(
this=Identifier(this=value1, quoted=False)),
Column(
this=Identifier(this=value2, quoted=False))]),
partition_by=[
Column(
this=Identifier(this=value1, quoted=False))],
over=OVER),
table=Identifier(this=SOME_SCHEMA, quoted=False),
db=Identifier(this=SOME_DB, quoted=False))])))])`
Inside the anonymous TABLE (which is also a function that returns values as a table you can select on) we can see the expressions contain only one expression which should be another function but is recognized as a Column.
Verified this still happens on 25.31.4.
dialect: Snowflake | tobymao/sqlglot | diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 515a07c4..157947df 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -2250,3 +2250,13 @@ SINGLE = TRUE""",
self.validate_identity(
"GRANT ALL PRIVILEGES ON FUNCTION mydb.myschema.ADD5(number) TO ROLE analyst"
)
+
+ def test_window_function_arg(self):
+ query = "SELECT * FROM TABLE(db.schema.FUNC(a) OVER ())"
+
+ ast = self.parse_one(query)
+ window = ast.find(exp.Window)
+
+ self.assertEqual(ast.sql("snowflake"), query)
+ self.assertEqual(len(list(ast.find_all(exp.Column))), 1)
+ self.assertEqual(window.this.sql("snowflake"), "db.schema.FUNC(a)")
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 25.31 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
filelock==3.18.0
identify==2.6.9
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
Pygments==2.19.1
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.7.2
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@122ef5f41c4e29347026a81e6f6460ccf8e910ed#egg=sqlglot
tomli==2.2.1
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- identify==2.6.9
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pygments==2.19.1
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.7.2
- six==1.17.0
- tomli==2.2.1
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_snowflake.py::TestSnowflake::test_window_function_arg"
] | [] | [
"tests/dialects/test_snowflake.py::TestSnowflake::test_alter_set_unset",
"tests/dialects/test_snowflake.py::TestSnowflake::test_copy",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_from_changes",
"tests/dialects/test_snowflake.py::TestSnowflake::test_grant",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_querying_semi_structured_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_columns",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_imported_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_objects",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_primary_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_schemas",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_sequences",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_tables",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_unique_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_users",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_views",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_storage_integration",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values"
] | [] | MIT License | 20,258 | 322 | [
"sqlglot/parser.py"
] |
|
OCHA-DAP__hdx-python-country-63 | fa69943a6906c131d23bbb9926c8845e920b3106 | 2024-11-19 10:49:28 | fa69943a6906c131d23bbb9926c8845e920b3106 | diff --git a/src/hdx/location/country.py b/src/hdx/location/country.py
index dbdb239..b4e7500 100755
--- a/src/hdx/location/country.py
+++ b/src/hdx/location/country.py
@@ -212,7 +212,7 @@ class Country:
@classmethod
def countriesdata(
cls,
- use_live: bool = _use_live,
+ use_live: bool = None,
country_name_overrides: Dict = None,
country_name_mappings: Dict = None,
) -> List[Dict[str, Dict]]:
@@ -227,6 +227,8 @@ class Country:
Returns:
List[Dict[str,Dict]]: Countries dictionaries
"""
+ if use_live is None:
+ use_live = cls._use_live
if cls._countriesdata is None:
countries = None
if country_name_overrides is not None:
@@ -326,7 +328,7 @@ class Country:
def get_country_info_from_iso3(
cls,
iso3: str,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
) -> Optional[Dict[str, str]]:
"""Get country information from ISO3 code
@@ -352,7 +354,7 @@ class Country:
def get_country_name_from_iso3(
cls,
iso3: str,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
formal: bool = False,
) -> Optional[str]:
@@ -389,7 +391,7 @@ class Country:
def get_currency_from_iso3(
cls,
iso3: str,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
) -> Optional[int]:
"""Get currency code from ISO3 code
@@ -415,7 +417,7 @@ class Country:
def get_iso2_from_iso3(
cls,
iso3: str,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
) -> Optional[str]:
"""Get ISO2 from ISO3 code
@@ -441,7 +443,7 @@ class Country:
def get_iso3_from_iso2(
cls,
iso2: str,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
) -> Optional[str]:
"""Get ISO3 from ISO2 code
@@ -467,7 +469,7 @@ class Country:
def get_country_info_from_iso2(
cls,
iso2: str,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
) -> Optional[Dict[str, str]]:
"""Get country name from ISO2 code
@@ -493,7 +495,7 @@ class Country:
def get_country_name_from_iso2(
cls,
iso2: str,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
formal: bool = False,
) -> Optional[str]:
@@ -521,7 +523,7 @@ class Country:
def get_currency_from_iso2(
cls,
iso2: str,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
) -> Optional[str]:
"""Get currency from ISO2 code
@@ -547,7 +549,7 @@ class Country:
def get_m49_from_iso3(
cls,
iso3: str,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
) -> Optional[int]:
"""Get M49 from ISO3 code
@@ -573,7 +575,7 @@ class Country:
def get_iso3_from_m49(
cls,
m49: int,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
) -> Optional[str]:
"""Get ISO3 from M49 code
@@ -599,7 +601,7 @@ class Country:
def get_country_info_from_m49(
cls,
m49: int,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
) -> Optional[Dict[str, str]]:
"""Get country name from M49 code
@@ -623,7 +625,7 @@ class Country:
def get_country_name_from_m49(
cls,
m49: int,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
formal: bool = False,
) -> Optional[str]:
@@ -651,7 +653,7 @@ class Country:
def get_currency_from_m49(
cls,
m49: int,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
) -> Optional[str]:
"""Get currency from M49 code
@@ -754,7 +756,7 @@ class Country:
def get_iso3_country_code(
cls,
country: str,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
) -> Optional[str]:
"""Get ISO3 code for cls. Only exact matches or None are returned.
@@ -814,7 +816,7 @@ class Country:
def get_iso3_country_code_fuzzy(
cls,
country: str,
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
min_chars: int = 5,
) -> Tuple[Optional[str], bool]:
@@ -904,7 +906,7 @@ class Country:
def get_countries_in_region(
cls,
region: Union[int, str],
- use_live: bool = _use_live,
+ use_live: bool = None,
exception: Optional[ExceptionUpperBound] = None,
) -> List[str]:
"""Get countries (ISO3 codes) in region
| set_use_live_default not working as expected
I have called `Country.set_use_live_default(False)` but when calling any method to retrieve data, it's still using the live data. It seems to be setting `cls._use_live` correctly but the method param default is not picking this value up.
I will submit a PR for a fix. | OCHA-DAP/hdx-python-country | diff --git a/tests/hdx/location/test_country.py b/tests/hdx/location/test_country.py
index ad4215a..803d30f 100755
--- a/tests/hdx/location/test_country.py
+++ b/tests/hdx/location/test_country.py
@@ -696,8 +696,17 @@ class TestCountry:
assert Country._use_live is True
Country.set_use_live_default(False)
assert Country._use_live is False
+ # We should now be able to load from local data without setting use_live=False
+ Country._countriesdata = None
+ Country.set_ocha_path(
+ script_dir_plus_file("Countries_UZB_Deleted.csv", TestCountry)
+ )
+ assert Country.get_iso3_country_code("UZBEKISTAN") is None
Country.set_use_live_default(None)
assert Country._use_live is True
+ Country._countriesdata = None
+ assert Country.get_iso3_country_code("UZBEKISTAN") == "UZB"
+ Country._countriesdata = None
def test_ocha_feed_file_working(self):
countries = hxl.data(
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 3.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | annotated-types==0.7.0
attrs==24.2.0
certifi==2024.8.30
cfgv==3.4.0
chardet==5.2.0
charset-normalizer==3.4.0
click==8.1.7
coverage==7.6.7
distlib==0.3.9
et_xmlfile==2.0.0
exceptiongroup==1.2.2
filelock==3.16.1
frictionless==5.18.0
-e git+https://github.com/OCHA-DAP/hdx-python-country.git@fa69943a6906c131d23bbb9926c8845e920b3106#egg=hdx_python_country
hdx-python-utilities==3.7.4
humanize==4.11.0
identify==2.6.2
idna==3.10
ijson==3.3.0
iniconfig==2.0.0
isodate==0.7.2
Jinja2==3.1.4
jsonlines==4.0.0
jsonpath-ng==1.7.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
libhxl==5.2.2
loguru==0.7.2
markdown-it-py==3.0.0
marko==2.1.2
MarkupSafe==3.0.2
mdurl==0.1.2
nodeenv==1.9.1
openpyxl==3.1.5
packaging==24.2
petl==1.7.15
platformdirs==4.3.6
pluggy==1.5.0
ply==3.11
pre_commit==4.0.1
pydantic==2.9.2
pydantic_core==2.23.4
Pygments==2.18.0
pyphonetics==0.5.3
pytest==8.3.3
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
python-io-wrapper==0.3.1
python-slugify==8.0.4
PyYAML==6.0.2
ratelimit==2.2.1
referencing==0.35.1
requests==2.32.3
requests-file==2.1.0
rfc3986==2.0.0
rich==13.9.4
rpds-py==0.21.0
ruamel.yaml==0.18.6
ruamel.yaml.clib==0.2.12
shellingham==1.5.4
simpleeval==1.0.3
six==1.16.0
stringcase==1.2.0
structlog==24.4.0
tableschema-to-template==0.0.13
tabulate==0.9.0
tenacity==9.0.0
text-unidecode==1.3
tomli==2.2.1
typer==0.13.0
typing_extensions==4.12.2
Unidecode==1.3.8
urllib3==2.2.3
validators==0.34.0
virtualenv==20.27.1
xlrd==2.0.1
xlrd3==1.1.0
xlsx2csv==0.8.3
XlsxWriter==3.2.0
xlwt==1.3.0
| name: hdx-python-country
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- annotated-types==0.7.0
- attrs==24.2.0
- certifi==2024.8.30
- cfgv==3.4.0
- chardet==5.2.0
- charset-normalizer==3.4.0
- click==8.1.7
- coverage==7.6.7
- distlib==0.3.9
- et-xmlfile==2.0.0
- exceptiongroup==1.2.2
- filelock==3.16.1
- frictionless==5.18.0
- hdx-python-country==3.8.4
- hdx-python-utilities==3.7.4
- humanize==4.11.0
- identify==2.6.2
- idna==3.10
- ijson==3.3.0
- iniconfig==2.0.0
- isodate==0.7.2
- jinja2==3.1.4
- jsonlines==4.0.0
- jsonpath-ng==1.7.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- libhxl==5.2.2
- loguru==0.7.2
- markdown-it-py==3.0.0
- marko==2.1.2
- markupsafe==3.0.2
- mdurl==0.1.2
- nodeenv==1.9.1
- openpyxl==3.1.5
- packaging==24.2
- petl==1.7.15
- platformdirs==4.3.6
- pluggy==1.5.0
- ply==3.11
- pre-commit==4.0.1
- pydantic==2.9.2
- pydantic-core==2.23.4
- pygments==2.18.0
- pyphonetics==0.5.3
- pytest==8.3.3
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- python-io-wrapper==0.3.1
- python-slugify==8.0.4
- pyyaml==6.0.2
- ratelimit==2.2.1
- referencing==0.35.1
- requests==2.32.3
- requests-file==2.1.0
- rfc3986==2.0.0
- rich==13.9.4
- rpds-py==0.21.0
- ruamel-yaml==0.18.6
- ruamel-yaml-clib==0.2.12
- shellingham==1.5.4
- simpleeval==1.0.3
- six==1.16.0
- stringcase==1.2.0
- structlog==24.4.0
- tableschema-to-template==0.0.13
- tabulate==0.9.0
- tenacity==9.0.0
- text-unidecode==1.3
- tomli==2.2.1
- typer==0.13.0
- typing-extensions==4.12.2
- unidecode==1.3.8
- urllib3==2.2.3
- validators==0.34.0
- virtualenv==20.27.1
- wheel==0.45.0
- xlrd==2.0.1
- xlrd3==1.1.0
- xlsx2csv==0.8.3
- xlsxwriter==3.2.0
- xlwt==1.3.0
prefix: /opt/conda/envs/hdx-python-country
| [
"tests/hdx/location/test_country.py::TestCountry::test_get_country_info_from_iso3",
"tests/hdx/location/test_country.py::TestCountry::test_get_country_info_from_iso2",
"tests/hdx/location/test_country.py::TestCountry::test_get_country_info_from_m49",
"tests/hdx/location/test_country.py::TestCountry::test_use_live_default"
] | [] | [
"tests/hdx/location/test_country.py::TestCountry::test_get_country_name_from_iso3",
"tests/hdx/location/test_country.py::TestCountry::test_get_iso2_from_iso3",
"tests/hdx/location/test_country.py::TestCountry::test_get_iso3_from_iso2",
"tests/hdx/location/test_country.py::TestCountry::test_get_currency_from_iso3",
"tests/hdx/location/test_country.py::TestCountry::test_get_country_name_from_iso2",
"tests/hdx/location/test_country.py::TestCountry::test_get_currency_from_iso2",
"tests/hdx/location/test_country.py::TestCountry::test_get_m49_from_iso3",
"tests/hdx/location/test_country.py::TestCountry::test_get_iso3_from_m49",
"tests/hdx/location/test_country.py::TestCountry::test_get_country_name_from_m49",
"tests/hdx/location/test_country.py::TestCountry::test_get_currency_from_m49",
"tests/hdx/location/test_country.py::TestCountry::test_expand_countryname_abbrevs",
"tests/hdx/location/test_country.py::TestCountry::test_simplify_countryname",
"tests/hdx/location/test_country.py::TestCountry::test_get_iso3_country_code",
"tests/hdx/location/test_country.py::TestCountry::test_get_countries_in_region",
"tests/hdx/location/test_country.py::TestCountry::test_ocha_feed_file_working",
"tests/hdx/location/test_country.py::TestCountry::test_ocha_feed_local_file_working"
] | [] | MIT License | 20,263 | 1,565 | [
"src/hdx/location/country.py"
] |
|
tobymao__sqlglot-4430 | fd81f1bfee9a566b8df8bb501828c20bd72ac481 | 2024-11-20 09:51:37 | a2bde2e03e9ef8650756bf304db35b4876746d1f | diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py
index 65ff1b22..b7802f89 100644
--- a/sqlglot/dialects/bigquery.py
+++ b/sqlglot/dialects/bigquery.py
@@ -534,6 +534,7 @@ class BigQuery(Dialect):
**parser.Parser.FUNCTION_PARSERS,
"ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
"MAKE_INTERVAL": lambda self: self._parse_make_interval(),
+ "FEATURES_AT_TIME": lambda self: self._parse_features_at_time(),
}
FUNCTION_PARSERS.pop("TRIM")
@@ -764,7 +765,7 @@ class BigQuery(Dialect):
return unnest
- def _parse_make_interval(self):
+ def _parse_make_interval(self) -> exp.MakeInterval:
expr = exp.MakeInterval()
for arg_key in expr.arg_types:
@@ -784,6 +785,23 @@ class BigQuery(Dialect):
return expr
+ def _parse_features_at_time(self) -> exp.FeaturesAtTime:
+ expr = self.expression(
+ exp.FeaturesAtTime,
+ this=(self._match(TokenType.TABLE) and self._parse_table())
+ or self._parse_select(nested=True),
+ )
+
+ while self._match(TokenType.COMMA):
+ arg = self._parse_lambda()
+
+ # Get the LHS of the Kwarg and set the arg to that value, e.g
+ # "num_rows => 1" sets the expr's `num_rows` arg
+ if arg:
+ expr.set(arg.this.name, arg)
+
+ return expr
+
class Generator(generator.Generator):
INTERVAL_ALLOWS_PLURAL_FORM = False
JOIN_HINTS = False
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index a69e8362..ebbc94d9 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -5772,6 +5772,10 @@ class FromBase64(Func):
pass
+class FeaturesAtTime(Func):
+ arg_types = {"this": True, "time": False, "num_rows": False, "ignore_feature_nulls": False}
+
+
class ToBase64(Func):
pass
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index ae9dc35e..83222cdc 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -4543,3 +4543,16 @@ class Generator(metaclass=_Generator):
value = self.sql(expression, "expression")
value = f" {value}" if value else ""
return f"{this}{value}"
+
+ def featuresattime_sql(self, expression: exp.FeaturesAtTime) -> str:
+ this_sql = self.sql(expression, "this")
+ if isinstance(expression.this, exp.Table):
+ this_sql = f"TABLE {this_sql}"
+
+ return self.func(
+ "FEATURES_AT_TIME",
+ this_sql,
+ expression.args.get("time"),
+ expression.args.get("num_rows"),
+ expression.args.get("ignore_feature_nulls"),
+ )
| Missing Support : Google BigQuery ML functions
# Description
I'm encountering an issue with the BigQuery dialect in SQLGlot where it fails to parse queries containing BigQuery ML functions like ML.FEATURES_AT_TIME. The expected behavior is to convert the BigQuery query to an AST, but SQLGlot does not currently support these functions, resulting in a parse failure.
## Details
BigQuery ML provides specialized functions to interact with machine learning models and feature tables. One such function, ML.FEATURES_AT_TIME, is used to retrieve the values of features for entities at a given point in time. However, the BigQuery dialect in SQLGlot does not currently recognize or support this function's syntax, leading to parsing errors.
The specific functions and syntax that are not supported include:
``` ML.FEATURES_AT_TIME Function ```
Syntax:
```
ML.FEATURES_AT_TIME(
TABLE feature_table_name,
time => timestamp_expression,
[num_rows => int_expression],
[ignore_feature_nulls => boolean_expression]
)
```
Parameters:
#### feature_table_name: The name of the feature table (e.g., a table containing entity IDs, feature values, and timestamps).
#### time: A specific point in time for which the feature values are retrieved.
#### num_rows (Optional): The maximum number of rows to return for each entity.
#### ignore_feature_nulls (Optional): A flag to determine if rows with null feature values should be ignored.
Example Query:
```
WITH
feature_table AS (
SELECT * FROM UNNEST(
ARRAY<STRUCT<entity_id STRING, f_1 FLOAT64, f_2 FLOAT64, feature_timestamp TIMESTAMP>>[
('id1', 1.0, 1.0, TIMESTAMP '2022-06-10 12:00:00+00'),
('id2', 12.0, 24.0, TIMESTAMP '2022-06-11 12:00:00+00'),
('id1', 11.0, NULL, TIMESTAMP '2022-06-11 12:00:00+00'),
('id1', 6.0, 12.0, TIMESTAMP '2022-06-11 10:00:00+00'),
('id2', 2.0, 4.0, TIMESTAMP '2022-06-10 12:00:00+00'),
('id2', 7.0, NULL, TIMESTAMP '2022-06-11 10:00:00+00')])
)
SELECT *
FROM
ML.FEATURES_AT_TIME(
TABLE feature_table,
time => '2022-06-12 10:00:00+00',
num_rows => 1,
ignore_feature_nulls => TRUE);
```
## Use Case:
#### This function allows feature engineering for machine learning models by fetching specific feature values at a given timestamp for various entities.
### Expected Behavior
#### SQLGlot should parse BigQuery queries containing ML.FEATURES_AT_TIME without errors.
#### The resulting AST should represent the query structure, enabling transformations, validation, or migrations.
### Actual Behavior
#### SQLGlot fails to recognize ML.FEATURES_AT_TIME, resulting in a parsing error.
#### Steps to Reproduce
##### Write a BigQuery query with the ML.FEATURES_AT_TIME function as shown in the example above.
##### Attempt to parse it using SQLGlot's BigQuery dialect.
##### Observe the failure in parsing the query.
##### Proposed Solution
##### To address this limitation:
#### Extend SQLGlot's BigQuery dialect to include parsing rules for ML.FEATURES_AT_TIME.
#### Define a custom expression in SQLGlot for this function.
#### Update the generator to ensure proper SQL generation from the AST. | tobymao/sqlglot | diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index f8813855..f80c3ac5 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -1640,6 +1640,11 @@ WHERE
},
)
+ self.validate_identity(
+ "SELECT * FROM ML.FEATURES_AT_TIME(TABLE mydataset.feature_table, time => '2022-06-11 10:00:00+00', num_rows => 1, ignore_feature_nulls => TRUE)"
+ )
+ self.validate_identity("SELECT * FROM ML.FEATURES_AT_TIME((SELECT 1), num_rows => 1)")
+
def test_errors(self):
with self.assertRaises(TokenError):
transpile("'\\'", read="bigquery")
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 3
} | 25.31 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
filelock==3.18.0
identify==2.6.9
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.7.2
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@fd81f1bfee9a566b8df8bb501828c20bd72ac481#egg=sqlglot
tomli==2.2.1
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- identify==2.6.9
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.7.2
- six==1.17.0
- tomli==2.2.1
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery"
] | [] | [
"tests/dialects/test_bigquery.py::TestBigQuery::test_convert",
"tests/dialects/test_bigquery.py::TestBigQuery::test_errors",
"tests/dialects/test_bigquery.py::TestBigQuery::test_format_temporal",
"tests/dialects/test_bigquery.py::TestBigQuery::test_gap_fill",
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_inline_constructor",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_extract_array",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_extract_scalar",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_object",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::test_mod",
"tests/dialects/test_bigquery.py::TestBigQuery::test_models",
"tests/dialects/test_bigquery.py::TestBigQuery::test_null_ordering",
"tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names",
"tests/dialects/test_bigquery.py::TestBigQuery::test_range_type",
"tests/dialects/test_bigquery.py::TestBigQuery::test_regexp_extract",
"tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types",
"tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table",
"tests/dialects/test_bigquery.py::TestBigQuery::test_unix_seconds",
"tests/dialects/test_bigquery.py::TestBigQuery::test_unnest",
"tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions",
"tests/dialects/test_bigquery.py::TestBigQuery::test_warnings"
] | [] | MIT License | 20,275 | 789 | [
"sqlglot/dialects/bigquery.py",
"sqlglot/expressions.py",
"sqlglot/generator.py"
] |
|
camptocamp__python-geoservercloud-58 | 8500c2749001d5f130451a908e466a7109849ee0 | 2024-11-20 13:34:54 | 8500c2749001d5f130451a908e466a7109849ee0 | diff --git a/geoservercloud/geoservercloud.py b/geoservercloud/geoservercloud.py
index 7c1fea6..52a5b10 100644
--- a/geoservercloud/geoservercloud.py
+++ b/geoservercloud/geoservercloud.py
@@ -392,6 +392,16 @@ class GeoServerCloud:
)
return self.rest_service.create_feature_type(feature_type=feature_type)
+ def delete_feature_type(
+ self, workspace_name: str, datastore_name: str, layer_name: str
+ ) -> tuple[str, int]:
+ """
+ Delete a feature type and associated layer
+ """
+ return self.rest_service.delete_feature_type(
+ workspace_name, datastore_name, layer_name
+ )
+
def create_layer_group(
self,
group: str,
diff --git a/geoservercloud/models/featuretype.py b/geoservercloud/models/featuretype.py
index d117d47..ae2d199 100644
--- a/geoservercloud/models/featuretype.py
+++ b/geoservercloud/models/featuretype.py
@@ -108,9 +108,9 @@ class FeatureType(EntityModel):
feature_type = content["featureType"]
workspace_name = feature_type["store"]["name"].split(":")[0]
store_name = feature_type["store"]["name"].split(":")[1]
- title = feature_type.get("title", feature_type.get("internationalTitle"))
+ title = feature_type.get("internationalTitle", feature_type.get("title"))
abstract = feature_type.get(
- "abstract", feature_type.get("internationalAbstract")
+ "internationalAbstract", feature_type.get("abstract")
)
if feature_type.get("metadataLinks"):
metadata_links_payload = feature_type["metadataLinks"]["metadataLink"]
@@ -201,7 +201,13 @@ class FeatureType(EntityModel):
return {"featureType": content}
def put_payload(self) -> dict[str, Any]:
- return self.post_payload()
+ content = self.post_payload()
+ # Force a null value on non-i18ned attributes, otherwise GeoServer sets it to the first i18n value
+ if content["featureType"].get("internationalTitle"):
+ content["featureType"]["title"] = None
+ if content["featureType"].get("internationalAbstract"):
+ content["featureType"]["abstract"] = None
+ return content
def __repr__(self):
return json.dumps(self.post_payload(), indent=4)
diff --git a/geoservercloud/services/restservice.py b/geoservercloud/services/restservice.py
index 9c5fd37..1ebdc87 100644
--- a/geoservercloud/services/restservice.py
+++ b/geoservercloud/services/restservice.py
@@ -288,6 +288,15 @@ class RestService:
)
return response.content.decode(), response.status_code
+ def delete_feature_type(
+ self, workspace_name: str, datastore_name: str, layer_name: str
+ ) -> tuple[str, int]:
+ response: Response = self.rest_client.delete(
+ self.rest_endpoints.featuretype(workspace_name, datastore_name, layer_name),
+ params={"recurse": "true"},
+ )
+ return response.content.decode(), response.status_code
+
def create_layer_group(
self,
group: str,
| Handle default value of i18n attribute on PUT
When doing a PUT on an object with an i18n attribute (for instance `internationalTitle` in a feature type) it seems GeoServer will takes the first of the translation values and set it as the non-i18ned value (for instance `title`). This value than acts as a default value in the WebUI and when doing WMS requests without specifying a language. It is a problem when:
- updating the `internationalTitle`, because GeoServer does not update the `title` accordingly so the old value is still there
- copying a layer, because the lib until now always assumed the two attributes were mutually exclusive, so only the `title` is copied and the translations are lost.
See example below:
```
>>> import requests
>>> requests.post(
... "http://localhost:9099/geoserver/rest/workspaces/test/datastores/test/featuretypes.json",
... json={
... "featureType": {
... "name": "situation",
... "nativeName": "situation",
... "internationalTitle": {"fr": "Titre de la couche", "en": "Layer title"},
... }
... },
... auth=("admin", "geoserver"),
... )
<Response [201]>
>>> response = requests.get(
... "http://localhost:9099/geoserver/rest/workspaces/test/datastores/test/featuretypes/situation.json",
... auth=("admin", "geoserver"),
... )
>>> print(response.json().get("featureType").get("title"))
None
>>> requests.put(
... "http://localhost:9099/geoserver/rest/workspaces/test/datastores/test/featuretypes/situation.json",
... json={
... "featureType": {
... "name": "situation",
... "nativeName": "situation",
... "internationalTitle": {"fr": "Titre de la couche", "en": "Layer title"},
... }
... },
... auth=("admin", "geoserver"),
... )
<Response [200]>
>>> response = requests.get(
... "http://localhost:9099/geoserver/rest/workspaces/test/datastores/test/featuretypes/situation.json",
... auth=("admin", "geoserver"),
... )
>>> print(response.json().get("featureType").get("title"))
Titre de la couche
```
The title is `Titre de la couche` instead of `None` | camptocamp/python-geoservercloud | diff --git a/tests/test_feature_type.py b/tests/test_feature_type.py
index a5e41b8..27346cd 100644
--- a/tests/test_feature_type.py
+++ b/tests/test_feature_type.py
@@ -147,6 +147,16 @@ def feature_type_post_payload(
return {"featureType": content}
[email protected](scope="module")
+def feature_type_put_payload(
+ feature_type_post_payload: dict[str, dict[str, Any]]
+) -> dict[str, dict[str, Any]]:
+ content = feature_type_post_payload.copy()
+ content["featureType"]["title"] = None
+ content["featureType"]["abstract"] = None
+ return content
+
+
def test_get_feature_types(
geoserver: GeoServerCloud, feature_types_get_response_payload: dict[str, Any]
) -> None:
@@ -218,7 +228,7 @@ def test_create_feature_type(
def test_update_feature_type(
- geoserver: GeoServerCloud, feature_type_post_payload: dict[str, dict[str, Any]]
+ geoserver: GeoServerCloud, feature_type_put_payload: dict[str, dict[str, Any]]
) -> None:
with responses.RequestsMock() as rsps:
rsps.get(
@@ -227,7 +237,7 @@ def test_update_feature_type(
)
rsps.put(
f"{geoserver.url}/rest/workspaces/{WORKSPACE}/datastores/{STORE}/featuretypes/{LAYER}.json",
- match=[responses.matchers.json_params_matcher(feature_type_post_payload)],
+ match=[responses.matchers.json_params_matcher(feature_type_put_payload)],
status=200,
body=b"",
)
@@ -242,3 +252,19 @@ def test_update_feature_type(
assert content == ""
assert code == 200
+
+
+def test_delete_feature_type(geoserver: GeoServerCloud) -> None:
+ with responses.RequestsMock() as rsps:
+ rsps.delete(
+ f"{geoserver.url}/rest/workspaces/{WORKSPACE}/datastores/{STORE}/featuretypes/{LAYER}.json",
+ status=200,
+ body=b"",
+ match=[responses.matchers.query_param_matcher({"recurse": "true"})],
+ )
+ content, code = geoserver.delete_feature_type(
+ workspace_name=WORKSPACE, datastore_name=STORE, layer_name=LAYER
+ )
+
+ assert content == ""
+ assert code == 200
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 3
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "poetry install",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.10",
"reqs_path": [
"ci/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
build==1.2.2.post1
c2cciutils==1.7.0
CacheControl==0.14.2
cachetools==5.5.2
certifi==2024.8.30
cffi==1.17.1
cfgv==3.4.0
chardet==5.2.0
charset-normalizer==3.4.0
cleo==2.1.0
coverage==7.6.4
crashtest==0.4.1
cryptography==44.0.2
debian_inspector==31.1.0
defusedxml==0.7.1
distlib==0.3.9
docutils==0.21.2
dulwich==0.21.7
dunamai==1.23.1
exceptiongroup==1.2.2
fastjsonschema==2.21.1
filelock==3.18.0
-e git+https://github.com/camptocamp/python-geoservercloud.git@8500c2749001d5f130451a908e466a7109849ee0#egg=geoservercloud
google-api-core==2.24.2
google-api-python-client==2.166.0
google-auth==2.38.0
google-auth-httplib2==0.2.0
google-auth-oauthlib==1.2.1
googleapis-common-protos==1.69.2
httplib2==0.22.0
id==1.5.0
identify==2.6.9
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.0.0
installer==0.7.0
jaraco.classes==3.4.0
jeepney==0.9.0
Jinja2==3.1.6
keyring==24.3.1
lxml==5.3.0
Markdown==3.7
markdown-it-py==3.0.0
markdown-table==2020.12.3
MarkupSafe==3.0.2
mdurl==0.1.2
more-itertools==10.6.0
msgpack==1.1.0
nh3==0.2.21
nodeenv==1.9.1
oauthlib==3.2.2
OWSLib==0.32.0
packaging==24.1
pexpect==4.9.0
pkginfo==1.12.1.2
platformdirs==4.3.7
pluggy==1.5.0
poetry==1.8.5
poetry-core==1.9.1
poetry-dynamic-versioning==1.4.1
poetry-plugin-drop-python-upper-constraint==0.1.0
poetry-plugin-export==1.8.0
poetry-plugin-tweak-dependencies-version==1.5.2
pre_commit==4.0.1
proto-plus==1.26.1
protobuf==6.30.2
ptyprocess==0.7.0
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycparser==2.22
Pygments==2.19.1
pyparsing==3.2.3
pyproject_hooks==1.2.0
pytest==8.3.3
pytest-mock==3.14.0
python-dateutil==2.9.0.post0
PyYAML==6.0.2
RapidFuzz==3.12.2
readme_renderer==44.0
requests==2.32.3
requests-oauthlib==2.0.0
requests-toolbelt==1.0.0
responses==0.25.3
rfc3986==2.0.0
rich==14.0.0
rsa==4.9
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
SecretStorage==3.3.3
security.md==0.2.3
shellingham==1.5.4
six==1.16.0
tomli==2.0.2
tomlkit==0.13.2
trove-classifiers==2025.3.19.19
twine==5.0.0
types-requests==2.32.0.20241016
types-xmltodict==0.14.0.20241009
typing_extensions==4.13.0
uritemplate==4.1.1
urllib3==2.2.3
virtualenv==20.29.3
xmltodict==0.14.2
zipp==3.21.0
| name: python-geoservercloud
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- build==1.2.2.post1
- c2cciutils==1.7.0
- cachecontrol==0.14.2
- cachetools==5.5.2
- certifi==2024.8.30
- cffi==1.17.1
- cfgv==3.4.0
- chardet==5.2.0
- charset-normalizer==3.4.0
- cleo==2.1.0
- coverage==7.6.4
- crashtest==0.4.1
- cryptography==44.0.2
- debian-inspector==31.1.0
- defusedxml==0.7.1
- distlib==0.3.9
- docutils==0.21.2
- dulwich==0.21.7
- dunamai==1.23.1
- exceptiongroup==1.2.2
- fastjsonschema==2.21.1
- filelock==3.18.0
- geoservercloud==0.3.1.dev29
- google-api-core==2.24.2
- google-api-python-client==2.166.0
- google-auth==2.38.0
- google-auth-httplib2==0.2.0
- google-auth-oauthlib==1.2.1
- googleapis-common-protos==1.69.2
- httplib2==0.22.0
- id==1.5.0
- identify==2.6.9
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.0.0
- installer==0.7.0
- jaraco-classes==3.4.0
- jeepney==0.9.0
- jinja2==3.1.6
- keyring==24.3.1
- lxml==5.3.0
- markdown==3.7
- markdown-it-py==3.0.0
- markdown-table==2020.12.3
- markupsafe==3.0.2
- mdurl==0.1.2
- more-itertools==10.6.0
- msgpack==1.1.0
- nh3==0.2.21
- nodeenv==1.9.1
- oauthlib==3.2.2
- owslib==0.32.0
- packaging==24.1
- pexpect==4.9.0
- pkginfo==1.12.1.2
- platformdirs==4.3.7
- pluggy==1.5.0
- poetry==1.8.5
- poetry-core==1.9.1
- poetry-dynamic-versioning==1.4.1
- poetry-plugin-drop-python-upper-constraint==0.1.0
- poetry-plugin-export==1.8.0
- poetry-plugin-tweak-dependencies-version==1.5.2
- pre-commit==4.0.1
- proto-plus==1.26.1
- protobuf==6.30.2
- ptyprocess==0.7.0
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pycparser==2.22
- pygments==2.19.1
- pyparsing==3.2.3
- pyproject-hooks==1.2.0
- pytest==8.3.3
- pytest-mock==3.14.0
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- rapidfuzz==3.12.2
- readme-renderer==44.0
- requests==2.32.3
- requests-oauthlib==2.0.0
- requests-toolbelt==1.0.0
- responses==0.25.3
- rfc3986==2.0.0
- rich==14.0.0
- rsa==4.9
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.12
- secretstorage==3.3.3
- security-md==0.2.3
- shellingham==1.5.4
- six==1.16.0
- tomli==2.0.2
- tomlkit==0.13.2
- trove-classifiers==2025.3.19.19
- twine==5.0.0
- types-requests==2.32.0.20241016
- types-xmltodict==0.14.0.20241009
- typing-extensions==4.13.0
- uritemplate==4.1.1
- urllib3==2.2.3
- virtualenv==20.29.3
- xmltodict==0.14.2
- zipp==3.21.0
prefix: /opt/conda/envs/python-geoservercloud
| [
"tests/test_feature_type.py::test_update_feature_type",
"tests/test_feature_type.py::test_delete_feature_type"
] | [] | [
"tests/test_feature_type.py::test_get_feature_types",
"tests/test_feature_type.py::test_get_feature_type",
"tests/test_feature_type.py::test_create_feature_type"
] | [] | BSD 2-Clause "Simplified" License | 20,278 | 779 | [
"geoservercloud/geoservercloud.py",
"geoservercloud/models/featuretype.py",
"geoservercloud/services/restservice.py"
] |
|
networkx__networkx-7729 | 9beaf7a0b59fe21775cd93862d9c7b28152a2d8c | 2024-11-21 11:10:10 | b369de6e892c1f0932f6118d322aed55cb0618f6 | nelsonaloysio: Done! Added a new test method to `TestEdgeSubGraph`. Let me know in case anything should be changed. Thanks! | diff --git a/networkx/classes/coreviews.py b/networkx/classes/coreviews.py
index a6e85213f..4769ffa71 100644
--- a/networkx/classes/coreviews.py
+++ b/networkx/classes/coreviews.py
@@ -397,7 +397,11 @@ class FilterMultiInner(FilterAdjacency): # muliedge_seconddict
yield n
def __getitem__(self, nbr):
- if nbr in self._atlas and self.NODE_OK(nbr):
+ if (
+ nbr in self._atlas
+ and self.NODE_OK(nbr)
+ and any(self.EDGE_OK(nbr, key) for key in self._atlas[nbr])
+ ):
def new_node_ok(key):
return self.EDGE_OK(nbr, key)
| `MultiGraph` views return inconsistent `has_edge` results
### Current Behavior
Filtered views of a `MultiGraph`, created with `edge_subgraph`, return inconsistent results from `has_edge`.
### Expected Behavior
Match the same results from a `Graph`: either `True` if the edge exists in the subgraph view, or `False` if not.
### Steps to Reproduce
In case of a `MultiGraph`: checking if the edge `('a', 'c')` exists in the view may return **either `True` or `False`**.
```python
import networkx as nx
G = nx.MultiGraph()
G.add_edges_from([("a", "b"),
("a", "c"), # <-- to be filtered out
("c", "b")])
H = nx.edge_subgraph(G, [("a", "b", 0), ("c", "b", 0)])
print(f"{H.edges()}\n\n"
f"{'c' in H['a']} \t 'c' in H['a']\n"
f"{'c' in list(H['a'])} \t 'c' in list(H['a'])\n\n"
f"{H.has_edge('a', 'c')} \t H.has_edge('a', 'c')\n"
f"{('a', 'c') in H.edges()} \t ('a', 'c') in H.edges()")
# [('a', 'b'), ('b', 'c')]
#
# True H.has_edge('a', 'c') # <-- inconsistent result
# False ('a', 'c') in H.edges()
#
# True 'c' in H['a'] # <-- inconsistent result
# False 'c' in list(H['a'])
```
In case of a `Graph`: checking if the edge `('a', 'c')` exists in the view returns **only `False`** (as expected).
```python
import networkx as nx
G = nx.Graph()
G.add_edges_from([("a", "b"),
("a", "c"),
("c", "b")])
H = nx.edge_subgraph(G, [("a", "b"), ("c", "b")])
print(f"{H.edges()}\n\n"
f"{'c' in H['a']} \t 'c' in H['a']\n"
f"{'c' in list(H['a'])} \t 'c' in list(H['a'])\n\n"
f"{H.has_edge('a', 'c')} \t H.has_edge('a', 'c')\n"
f"{('a', 'c') in H.edges()} \t ('a', 'c') in H.edges()")
# [('a', 'b'), ('b', 'c')]
#
# False H.has_edge('a', 'c')
# False ('a', 'c') in H.edges()
#
# False 'c' in H['a']
# False 'c' in list(H['a'])
```
Note that the same results follow in case of a `DiGraph` vs. a `MultiDiGraph` object.
### Environment
Python version: `3.11.10`
NetworkX version: `3.4.2`
### Additional context
I am not sure if this is a known issue or a side-effect of edge keys in multigraphs, but thought I should report it just in case. | networkx/networkx | diff --git a/networkx/classes/tests/test_subgraphviews.py b/networkx/classes/tests/test_subgraphviews.py
index 73e0fdd2d..7f70b29ce 100644
--- a/networkx/classes/tests/test_subgraphviews.py
+++ b/networkx/classes/tests/test_subgraphviews.py
@@ -360,3 +360,12 @@ class TestEdgeSubGraph:
pytest.raises(nx.NetworkXError, self.H.remove_node, 0)
pytest.raises(nx.NetworkXError, self.H.add_edge, 5, 6)
pytest.raises(nx.NetworkXError, self.H.remove_edge, 0, 1)
+
+ def test_consistent_edges(self):
+ # See gh-7724.
+ """Tests that the subgraph returns consistent filtered edges."""
+ G = nx.MultiDiGraph()
+ G.add_edges_from([("a", "b"), ("a", "c"), ("c", "b")])
+ H = nx.edge_subgraph(G, [("a", "b", 0), ("c", "b", 0)])
+ assert "c" not in H["a"]
+ assert not H.has_edge("a", "c")
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 3.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[default]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.11",
"reqs_path": [
"requirements/default.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | contourpy==1.3.1
cycler==0.12.1
fonttools==4.56.0
iniconfig==2.1.0
kiwisolver==1.4.8
matplotlib==3.10.1
-e git+https://github.com/networkx/networkx.git@9beaf7a0b59fe21775cd93862d9c7b28152a2d8c#egg=networkx
numpy==2.2.4
packaging==24.2
pandas==2.2.3
pillow==11.1.0
pluggy==1.5.0
pyparsing==3.2.3
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
scipy==1.15.2
six==1.17.0
tzdata==2025.2
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py311h06a4308_0
- python=3.11.11=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py311h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py311h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- contourpy==1.3.1
- cycler==0.12.1
- fonttools==4.56.0
- iniconfig==2.1.0
- kiwisolver==1.4.8
- matplotlib==3.10.1
- networkx==3.5rc0.dev0
- numpy==2.2.4
- packaging==24.2
- pandas==2.2.3
- pillow==11.1.0
- pluggy==1.5.0
- pyparsing==3.2.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scipy==1.15.2
- six==1.17.0
- tzdata==2025.2
prefix: /opt/conda/envs/networkx
| [
"networkx/classes/tests/test_subgraphviews.py::TestEdgeSubGraph::test_consistent_edges"
] | [] | [
"networkx/classes/tests/test_subgraphviews.py::TestSubGraphView::test_hidden_nodes",
"networkx/classes/tests/test_subgraphviews.py::TestSubGraphView::test_hidden_edges",
"networkx/classes/tests/test_subgraphviews.py::TestSubGraphView::test_shown_node",
"networkx/classes/tests/test_subgraphviews.py::TestSubGraphView::test_shown_edges",
"networkx/classes/tests/test_subgraphviews.py::TestSubDiGraphView::test_hidden_nodes",
"networkx/classes/tests/test_subgraphviews.py::TestSubDiGraphView::test_hidden_edges",
"networkx/classes/tests/test_subgraphviews.py::TestSubDiGraphView::test_shown_node",
"networkx/classes/tests/test_subgraphviews.py::TestSubDiGraphView::test_shown_edges",
"networkx/classes/tests/test_subgraphviews.py::TestSubDiGraphView::test_inoutedges",
"networkx/classes/tests/test_subgraphviews.py::TestSubDiGraphView::test_pred",
"networkx/classes/tests/test_subgraphviews.py::TestSubDiGraphView::test_inout_degree",
"networkx/classes/tests/test_subgraphviews.py::TestMultiGraphView::test_hidden_nodes",
"networkx/classes/tests/test_subgraphviews.py::TestMultiGraphView::test_shown_node",
"networkx/classes/tests/test_subgraphviews.py::TestMultiGraphView::test_hidden_edges",
"networkx/classes/tests/test_subgraphviews.py::TestMultiGraphView::test_shown_edges",
"networkx/classes/tests/test_subgraphviews.py::TestMultiDiGraphView::test_hidden_nodes",
"networkx/classes/tests/test_subgraphviews.py::TestMultiDiGraphView::test_shown_node",
"networkx/classes/tests/test_subgraphviews.py::TestMultiDiGraphView::test_inoutedges",
"networkx/classes/tests/test_subgraphviews.py::TestMultiDiGraphView::test_pred",
"networkx/classes/tests/test_subgraphviews.py::TestMultiDiGraphView::test_hidden_edges",
"networkx/classes/tests/test_subgraphviews.py::TestMultiDiGraphView::test_shown_edges",
"networkx/classes/tests/test_subgraphviews.py::TestMultiDiGraphView::test_inout_degree",
"networkx/classes/tests/test_subgraphviews.py::TestInducedSubGraph::test_full_graph",
"networkx/classes/tests/test_subgraphviews.py::TestInducedSubGraph::test_partial_subgraph",
"networkx/classes/tests/test_subgraphviews.py::TestEdgeSubGraph::test_correct_nodes",
"networkx/classes/tests/test_subgraphviews.py::TestEdgeSubGraph::test_correct_edges",
"networkx/classes/tests/test_subgraphviews.py::TestEdgeSubGraph::test_add_node",
"networkx/classes/tests/test_subgraphviews.py::TestEdgeSubGraph::test_remove_node",
"networkx/classes/tests/test_subgraphviews.py::TestEdgeSubGraph::test_node_attr_dict",
"networkx/classes/tests/test_subgraphviews.py::TestEdgeSubGraph::test_edge_attr_dict",
"networkx/classes/tests/test_subgraphviews.py::TestEdgeSubGraph::test_graph_attr_dict",
"networkx/classes/tests/test_subgraphviews.py::TestEdgeSubGraph::test_readonly"
] | [] | BSD 3-Clause | 20,283 | 191 | [
"networkx/classes/coreviews.py"
] |
NCSU-High-Powered-Rocketry-Club__AirbrakesV2-98 | f0891a6f1fa956c947bfacb671af18d96d3c2aea | 2024-11-21 20:24:54 | 93fc4f2d44da78a74a06d321d5f2e7ef3cafce66 | diff --git a/airbrakes/hardware/servo.py b/airbrakes/hardware/servo.py
index cad697c..07bb2e1 100644
--- a/airbrakes/hardware/servo.py
+++ b/airbrakes/hardware/servo.py
@@ -2,7 +2,6 @@
the airbrakes."""
import threading
-import time
import warnings
import gpiozero
@@ -17,7 +16,7 @@ class Servo:
controlling GPIO pins on the Raspberry Pi.
"""
- __slots__ = ("current_extension", "servo")
+ __slots__ = ("_go_to_max_no_buzz", "_go_to_min_no_buzz", "current_extension", "servo")
def __init__(self, gpio_pin_number: int, pin_factory=None) -> None:
"""
@@ -40,41 +39,53 @@ class Servo:
gpiozero.Device.pin_factory = pin_factory
self.servo = gpiozero.Servo(gpio_pin_number)
+ # We have to use threading to avoid blocking the main thread because our extension methods
+ # need to run at a specific time.
+ self._go_to_max_no_buzz = threading.Timer(SERVO_DELAY, self._set_max_no_buzz)
+ self._go_to_min_no_buzz = threading.Timer(SERVO_DELAY, self._set_min_no_buzz)
def set_extended(self) -> None:
"""
- Extends the servo to the maximum extension.
+ Extends the servo to the maximum extension. Starts a timer to stop the buzzing after the
+ servo reaches the maximum extension.
"""
- # We have to use threading to avoid blocking the main thread because our extension methods
- # sleep
- thread = threading.Thread(target=self._extend_then_no_buzz)
- thread.start()
+ # If we are already going to the minimum extension, we cancel that operation before
+ # extending the servo.
+ self._go_to_min_no_buzz.cancel()
+
+ self._set_extension(ServoExtension.MAX_EXTENSION)
+
+ # Creates a timer to stop the buzzing after the servo reaches the maximum extension
+ self._go_to_max_no_buzz = threading.Timer(SERVO_DELAY, self._set_max_no_buzz)
+ self._go_to_max_no_buzz.start()
def set_retracted(self) -> None:
"""
- Retracts the servo to the minimum extension.
+ Retracts the servo to the minimum extension. Starts a timer to stop the buzzing after the
+ servo reaches the minimum extension.
"""
- thread = threading.Thread(target=self._retract_then_no_buzz)
- thread.start()
+ # If we are already going to the maximum extension, we cancel that operation before
+ # retracting the servo.
+ self._go_to_max_no_buzz.cancel()
- def _extend_then_no_buzz(self) -> None:
+ self._set_extension(ServoExtension.MIN_EXTENSION)
+
+ # Creates a timer to stop the buzzing after the servo reaches the minimum extension
+ self._go_to_min_no_buzz = threading.Timer(SERVO_DELAY, self._set_min_no_buzz)
+ self._go_to_min_no_buzz.start()
+
+ def _set_max_no_buzz(self) -> None:
"""
- Extends the servo then stops buzzing. This extends the servo to the maximum extension,
- waits for the servo to reach the physical end of the air brakes, and then sets its
- extension to its actual extension.
+ Extends the servo to the stop buzz position. This extends the servo such that it reaches
+ the physical end of the airbrakes, and then sets its extension to its actual extension.
"""
- self._set_extension(ServoExtension.MAX_EXTENSION)
- time.sleep(SERVO_DELAY)
self._set_extension(ServoExtension.MAX_NO_BUZZ)
- def _retract_then_no_buzz(self) -> None:
+ def _set_min_no_buzz(self) -> None:
"""
- Retracts the servo then stops buzzing. This retracts the servo to the minimum extension,
- waits for the servo to reach the physical end of the air brakes, and then sets its
- extension to its actual extension.
+ Retracts the servo to the stop buzz position. This retracts the servo such that it reaches
+ the physical start of the airbrakes, and then sets its extension to its actual extension.
"""
- self._set_extension(ServoExtension.MIN_EXTENSION)
- time.sleep(SERVO_DELAY)
self._set_extension(ServoExtension.MIN_NO_BUZZ)
def _set_extension(self, extension: ServoExtension) -> None:
| Fix controls logic
If we are doing an actual control flight, and our target altitude is quite close to predicted apogee, then our controls logic would subtly fail because of race conditions. Consider this case, when our target altitude is 800m:
1. Predicted apogee is 801m:
- Airbrakes extend to `MAX`. Thread to extend to `MAX_NO_BUZZ` is set to execute in 0.3 seconds.
2. Predicted apogee is 799m, before 0.3 seconds have passed:
- Airbrakes extend to `MIN`. Thread to extend to `MIN_NO_BUZZ` is set to execute in 0.3 seconds.
3. 0.3 seconds from the first extension has passed, airbrakes go to `MAX_NO_BUZZ` (incorrect!)
4. A little bit later, the airbrakes are then extended to `MIN_NO_BUZZ`, as they should have been.
### Proposed Fix
We should use [Timer](https://docs.python.org/3.12/library/threading.html#threading.Timer) objects and cancel the execution of the NO_BUZZ code whenever we are setting the opposite extension.
Tests for the servo are also long overdue.
| NCSU-High-Powered-Rocketry-Club/AirbrakesV2 | diff --git a/tests/conftest.py b/tests/conftest.py
index 57d3a8d..b75ca92 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -62,11 +62,13 @@ def airbrakes(imu, logger, servo, data_processor, apogee_predictor):
@pytest.fixture
def random_data_mock_imu():
+ # A mock IMU that outputs random data packets
return RandomDataIMU(port=PORT, frequency=FREQUENCY)
@pytest.fixture
def idle_mock_imu():
+ # A sleeping IMU that doesn't output any data packets
return IdleIMU(port=PORT, frequency=FREQUENCY)
diff --git a/tests/test_servo.py b/tests/test_servo.py
new file mode 100644
index 0000000..8dfc394
--- /dev/null
+++ b/tests/test_servo.py
@@ -0,0 +1,90 @@
+import threading
+import time
+
+import gpiozero
+
+from airbrakes.hardware.servo import Servo
+from constants import SERVO_DELAY, ServoExtension
+
+
+class TestServo:
+ """Tests the Servo class, which controls the servo that extends and retracts the airbrakes."""
+
+ def test_slots(self, servo):
+ inst = servo
+ for attr in inst.__slots__:
+ assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
+
+ def test_init(self, servo):
+ assert isinstance(servo, Servo)
+ assert isinstance(servo.current_extension, ServoExtension)
+ assert servo.current_extension == ServoExtension.MIN_NO_BUZZ
+ assert isinstance(servo.servo, gpiozero.Servo)
+ assert isinstance(servo._go_to_max_no_buzz, threading.Timer)
+ assert isinstance(servo._go_to_min_no_buzz, threading.Timer)
+
+ def test_set_extension(self, servo):
+ servo._set_extension(ServoExtension.MAX_EXTENSION)
+ assert servo.current_extension == ServoExtension.MAX_EXTENSION
+ assert servo.servo.value == ServoExtension.MAX_EXTENSION.value
+ servo._set_extension(ServoExtension.MIN_EXTENSION)
+ assert servo.current_extension == ServoExtension.MIN_EXTENSION
+ assert servo.servo.value == ServoExtension.MIN_EXTENSION.value
+
+ def test_set_extended(self, servo):
+ """Tests that the servo extends to the maximum extension, and that threading is handled
+ correctly."""
+ assert servo.current_extension == ServoExtension.MIN_NO_BUZZ
+ servo.set_extended()
+ assert servo.current_extension == ServoExtension.MAX_EXTENSION
+ assert servo.servo.value == ServoExtension.MAX_EXTENSION.value
+ time.sleep(SERVO_DELAY + 0.05)
+ assert servo.current_extension == ServoExtension.MAX_NO_BUZZ
+ assert servo.servo.value == ServoExtension.MAX_NO_BUZZ.value
+
+ def test_set_retracted(self, servo):
+ """Tests that the servo retracts to the minimum extension, and that threading is handled
+ correctly."""
+ assert servo.current_extension == ServoExtension.MIN_NO_BUZZ
+ servo.set_retracted()
+ assert servo.current_extension == ServoExtension.MIN_EXTENSION
+ assert servo.servo.value == ServoExtension.MIN_EXTENSION.value
+ time.sleep(SERVO_DELAY + 0.05)
+ assert servo.current_extension == ServoExtension.MIN_NO_BUZZ
+ assert servo.servo.value == ServoExtension.MIN_NO_BUZZ.value
+
+ def test_repeated_extension_retraction(self, servo):
+ """Tests that repeatedly extending and retracting the servo works as expected, and has
+ no race conditions with threads."""
+
+ assert servo.current_extension == ServoExtension.MIN_NO_BUZZ
+
+ servo.set_extended()
+ assert servo.current_extension == ServoExtension.MAX_EXTENSION
+ assert servo.servo.value == ServoExtension.MAX_EXTENSION.value
+ # Assert that going to min no buzz was cancelled:
+ assert servo._go_to_min_no_buzz.finished.is_set()
+ # Assert that the thread to tell the servo to go to max no buzz has started:
+ assert servo._go_to_max_no_buzz._started.is_set()
+
+ time_taken = SERVO_DELAY / 2 # At 0.15s
+ time.sleep(time_taken)
+ servo.set_retracted()
+ assert servo.current_extension == ServoExtension.MIN_EXTENSION
+ assert servo.servo.value == ServoExtension.MIN_EXTENSION.value
+ # Assert that going to max no buzz was cancelled:
+ assert servo._go_to_max_no_buzz.finished.is_set()
+ # Assert that the thread to tell the servo to go to min no buzz has started:
+ assert servo._go_to_min_no_buzz._started.is_set()
+
+ # At 0.32s, make sure the servo will *not* go to max_no_buzz
+ time_taken = SERVO_DELAY / 2 + 0.02 # The 0.02 is to give the code time to execute:
+ time.sleep(time_taken)
+ assert servo.current_extension == ServoExtension.MIN_EXTENSION
+ assert servo.servo.value == ServoExtension.MIN_EXTENSION.value
+
+ # At 0.45s, make sure the servo will go to min_no_buzz:
+ time_taken = SERVO_DELAY / 2
+ time.sleep(time_taken)
+ assert servo.current_extension == ServoExtension.MIN_NO_BUZZ
+ assert servo.servo.value == ServoExtension.MIN_NO_BUZZ.value
diff --git a/tests/test_state.py b/tests/test_state.py
index 3677f1d..1e1f807 100644
--- a/tests/test_state.py
+++ b/tests/test_state.py
@@ -1,4 +1,3 @@
-import time
from abc import ABC
import pytest
@@ -18,7 +17,6 @@ from constants import (
LOG_BUFFER_SIZE,
MAX_FREE_FALL_LENGTH,
MAX_VELOCITY_THRESHOLD,
- SERVO_DELAY,
ServoExtension,
)
@@ -40,7 +38,7 @@ def state(airbrakes):
@pytest.fixture
-def stand_by_state(airbrakes):
+def standby_state(airbrakes):
return StandbyState(airbrakes)
@@ -83,8 +81,6 @@ class TestState:
def test_init(self, state, airbrakes):
assert state.context == airbrakes
assert airbrakes.servo.current_extension == ServoExtension.MIN_EXTENSION
- time.sleep(SERVO_DELAY + 0.2) # wait for servo to extend
- assert airbrakes.servo.current_extension == ServoExtension.MIN_NO_BUZZ
assert issubclass(state.__class__, ABC)
def test_name(self, state):
@@ -94,20 +90,17 @@ class TestState:
class TestStandbyState:
"""Tests the StandbyState class"""
- def test_slots(self, stand_by_state):
- inst = stand_by_state
+ def test_slots(self, standby_state):
+ inst = standby_state
for attr in inst.__slots__:
assert getattr(inst, attr, "err") != "err", f"got extra slot '{attr}'"
- def test_init(self, stand_by_state, airbrakes):
- assert stand_by_state.context == airbrakes
- assert airbrakes.servo.current_extension == ServoExtension.MIN_EXTENSION
- time.sleep(SERVO_DELAY + 0.2) # wait for servo to extend
- assert airbrakes.servo.current_extension == ServoExtension.MIN_NO_BUZZ
- assert issubclass(stand_by_state.__class__, State)
+ def test_init(self, standby_state, airbrakes):
+ assert standby_state.context == airbrakes
+ assert issubclass(standby_state.__class__, State)
- def test_name(self, stand_by_state):
- assert stand_by_state.name == "StandbyState"
+ def test_name(self, standby_state):
+ assert standby_state.name == "StandbyState"
@pytest.mark.parametrize(
("current_velocity", "current_altitude", "expected_state"),
@@ -126,11 +119,11 @@ class TestStandbyState:
"high_velocity",
],
)
- def test_update(self, stand_by_state, current_velocity, current_altitude, expected_state):
- stand_by_state.context.data_processor._vertical_velocities = [current_velocity]
- stand_by_state.context.data_processor._current_altitudes = [current_altitude]
- stand_by_state.update()
- assert isinstance(stand_by_state.context.state, expected_state)
+ def test_update(self, standby_state, current_velocity, current_altitude, expected_state):
+ standby_state.context.data_processor._vertical_velocities = [current_velocity]
+ standby_state.context.data_processor._current_altitudes = [current_altitude]
+ standby_state.update()
+ assert isinstance(standby_state.context.state, expected_state)
class TestMotorBurnState:
@@ -143,9 +136,6 @@ class TestMotorBurnState:
def test_init(self, motor_burn_state, airbrakes):
assert motor_burn_state.context == airbrakes
- assert airbrakes.servo.current_extension == ServoExtension.MIN_EXTENSION
- time.sleep(SERVO_DELAY + 0.1) # wait for servo to extend
- assert airbrakes.servo.current_extension == ServoExtension.MIN_NO_BUZZ
assert issubclass(motor_burn_state.__class__, State)
assert motor_burn_state.start_time_ns == 0
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.12",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/NCSU-High-Powered-Rocketry-Club/AirbrakesV2.git@f0891a6f1fa956c947bfacb671af18d96d3c2aea#egg=AirbrakesV2
cfgv==3.4.0
colorama==0.4.6
colorzero==2.0
coverage==7.8.0
distlib==0.3.9
filelock==3.18.0
gpiozero==2.0.1
identify==2.6.9
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
msgspec==0.19.0
nodeenv==1.9.1
numpy==2.2.4
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pigpio==1.78
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pre_commit==4.2.0
psutil==7.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.11.2
scipy==1.15.2
setuptools==75.8.0
six==1.17.0
tzdata==2025.2
virtualenv==20.29.3
wheel==0.45.1
| name: AirbrakesV2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- expat=2.6.4=h6a678d5_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py312h06a4308_0
- pip=25.0=py312h06a4308_0
- pluggy=1.5.0=py312h06a4308_0
- pytest=8.3.4=py312h06a4308_0
- python=3.12.9=h5148396_0
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py312h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py312h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- airbrakesv2==0.1.0
- cfgv==3.4.0
- colorama==0.4.6
- colorzero==2.0
- coverage==7.8.0
- distlib==0.3.9
- filelock==3.18.0
- gpiozero==2.0.1
- identify==2.6.9
- msgspec==0.19.0
- nodeenv==1.9.1
- numpy==2.2.4
- pandas==2.2.3
- pigpio==1.78
- platformdirs==4.3.7
- pre-commit==4.2.0
- psutil==7.0.0
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.11.2
- scipy==1.15.2
- six==1.17.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/AirbrakesV2
| [
"tests/test_servo.py::TestServo::test_init",
"tests/test_servo.py::TestServo::test_repeated_extension_retraction"
] | [] | [
"tests/test_servo.py::TestServo::test_slots",
"tests/test_servo.py::TestServo::test_set_extension",
"tests/test_servo.py::TestServo::test_set_extended",
"tests/test_servo.py::TestServo::test_set_retracted",
"tests/test_state.py::TestState::test_slots",
"tests/test_state.py::TestState::test_init",
"tests/test_state.py::TestState::test_name",
"tests/test_state.py::TestStandbyState::test_slots",
"tests/test_state.py::TestStandbyState::test_init",
"tests/test_state.py::TestStandbyState::test_name",
"tests/test_state.py::TestStandbyState::test_update[at_launchpad]",
"tests/test_state.py::TestStandbyState::test_update[only_alt_update]",
"tests/test_state.py::TestStandbyState::test_update[slow_alt_update]",
"tests/test_state.py::TestStandbyState::test_update[optimal_condition]",
"tests/test_state.py::TestStandbyState::test_update[high_velocity]",
"tests/test_state.py::TestMotorBurnState::test_slots",
"tests/test_state.py::TestMotorBurnState::test_init",
"tests/test_state.py::TestMotorBurnState::test_name",
"tests/test_state.py::TestMotorBurnState::test_update[at_launchpad]",
"tests/test_state.py::TestMotorBurnState::test_update[motor_burn]",
"tests/test_state.py::TestMotorBurnState::test_update[decreasing_velocity_under_threshold]",
"tests/test_state.py::TestMotorBurnState::test_update[decreasing_velocity_over_threshold]",
"tests/test_state.py::TestCoastState::test_slots",
"tests/test_state.py::TestCoastState::test_init",
"tests/test_state.py::TestCoastState::test_name",
"tests/test_state.py::TestCoastState::test_update_without_controls[climbing]",
"tests/test_state.py::TestCoastState::test_update_without_controls[just_descent]",
"tests/test_state.py::TestCoastState::test_update_without_controls[faulty_speed]",
"tests/test_state.py::TestCoastState::test_update_without_controls[at_apogee]",
"tests/test_state.py::TestCoastState::test_update_with_controls[extend_1]",
"tests/test_state.py::TestCoastState::test_update_with_controls[retract_1]",
"tests/test_state.py::TestCoastState::test_update_with_controls[retract_2]",
"tests/test_state.py::TestCoastState::test_update_with_controls[equal_alt]",
"tests/test_state.py::TestCoastState::test_update_with_controls[extend_2]",
"tests/test_state.py::TestCoastState::test_update_with_controls[extend_3]",
"tests/test_state.py::TestCoastState::test_update_control_only_once",
"tests/test_state.py::TestFreeFallState::test_slots",
"tests/test_state.py::TestFreeFallState::test_init",
"tests/test_state.py::TestFreeFallState::test_name",
"tests/test_state.py::TestFreeFallState::test_update[falling]",
"tests/test_state.py::TestFreeFallState::test_update[almost_landed]",
"tests/test_state.py::TestFreeFallState::test_update[close_to_ground_but_falling]",
"tests/test_state.py::TestFreeFallState::test_update[landed]",
"tests/test_state.py::TestFreeFallState::test_update[slightly_short]",
"tests/test_state.py::TestFreeFallState::test_update[too_long]",
"tests/test_state.py::TestLandedState::test_slots",
"tests/test_state.py::TestLandedState::test_init",
"tests/test_state.py::TestLandedState::test_name",
"tests/test_state.py::TestLandedState::test_update"
] | [] | MIT License | 20,289 | 1,047 | [
"airbrakes/hardware/servo.py"
] |
|
joshtemple__lkml-97 | 3b08d65c6d083dc44648735e96f14a796a84ba13 | 2024-11-21 21:22:33 | 3b08d65c6d083dc44648735e96f14a796a84ba13 | diff --git a/lkml/simple.py b/lkml/simple.py
index 0d3b651..275b85c 100644
--- a/lkml/simple.py
+++ b/lkml/simple.py
@@ -226,7 +226,7 @@ class DictParser:
singular_key == "allowed_value"
and self.parent_key.rstrip("s") == "access_grant"
)
- and not (self.parent_key == "query")
+ and not (self.parent_key == "query" and singular_key != "filters")
)
def resolve_filters(self, values: List[dict]) -> Union[List[BlockNode], ListNode]:
| Simple Parser unnests `dimensions` and `measures` in `query` fields
For [query](https://docs.looker.com/reference/explore-params/query) fields, the dimensions and measures keys are special - they are kept in plural form, with a list as the value.
When the simple parser parses this field, it sees that it's a pluralized key, and unnests them. Example:
```
In [98]: lkml.dump(
...: {
...: "explores": [
...: {
...: "name": "queried_explore",
...: "queries": [
...: {
...: "dimensions": ["first_dim"],
...: "measures": ["first_meas"],
...: "pivots": ["first_pivot"],
...: "name": "aquery"
...: }
...: ]
...: }
...: ]
...: }
...: )
Out[98]: 'explore: queried_explore {\n query: aquery {\n dimension: first_dim\n measure: first_meas\n pivots: [first_pivot]\n }\n}'
``` | joshtemple/lkml | diff --git a/tests/test_simple.py b/tests/test_simple.py
index 2c5a748..be7e84d 100644
--- a/tests/test_simple.py
+++ b/tests/test_simple.py
@@ -268,7 +268,7 @@ def test_parse_query(parser):
{
"name": "query_one",
"dimensions": ["dimension_one", "dimension_two"],
- "measures": ["measure_one"]
+ "measures": ["measure_one"],
}
]
}
@@ -284,6 +284,32 @@ def test_parse_query(parser):
)
+def test_parse_query_with_filters(parser):
+ obj = {
+ "explores": [
+ {
+ "queries": [{"filters__all": [[{"baz": "expression"}, {"qux": "expression"}]], "name": "bar"}],
+ "name": "foo",
+ }
+ ]
+ }
+ node = parser.parse(obj)
+ result = str(node)
+ print(result)
+ assert result == "\n".join(
+ (
+ "explore: foo {",
+ " query: bar {",
+ " filters: [",
+ ' baz: "expression",',
+ ' qux: "expression",',
+ " ]",
+ " }",
+ "}",
+ )
+ )
+
+
def test_resolve_filters_filter_only_field(parser):
nodes = parser.resolve_filters(
[{"name": "filter_a", "type": "string"}, {"name": "filter_b", "type": "number"}]
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
black==22.3.0
certifi==2025.1.31
chardet==3.0.4
click==8.1.8
coverage==7.8.0
distlib==0.3.9
filelock==3.18.0
flake8==3.8.3
idna==2.10
iniconfig==2.1.0
isort==4.3.20
-e git+https://github.com/joshtemple/lkml.git@3b08d65c6d083dc44648735e96f14a796a84ba13#egg=lkml
mccabe==0.6.1
more-itertools==10.6.0
mypy==0.782
mypy_extensions==0.4.4
packaging==24.2
pathspec==0.12.1
platformdirs==4.3.7
pluggy==0.13.1
py==1.11.0
pycodestyle==2.6.0
pyflakes==2.2.0
pytest==6.0.1
pytest-cov==2.7.1
requests==2.24.0
six==1.17.0
toml==0.10.2
tomli==2.2.1
tox==3.20.1
typed-ast==1.4.3
typing_extensions==4.13.0
urllib3==1.25.11
virtualenv==20.29.3
| name: lkml
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- black==22.3.0
- certifi==2025.1.31
- chardet==3.0.4
- click==8.1.8
- coverage==7.8.0
- distlib==0.3.9
- filelock==3.18.0
- flake8==3.8.3
- idna==2.10
- iniconfig==2.1.0
- isort==4.3.20
- lkml==1.3.5
- mccabe==0.6.1
- more-itertools==10.6.0
- mypy==0.782
- mypy-extensions==0.4.4
- packaging==24.2
- pathspec==0.12.1
- platformdirs==4.3.7
- pluggy==0.13.1
- py==1.11.0
- pycodestyle==2.6.0
- pyflakes==2.2.0
- pytest==6.0.1
- pytest-cov==2.7.1
- requests==2.24.0
- six==1.17.0
- toml==0.10.2
- tomli==2.2.1
- tox==3.20.1
- typed-ast==1.4.3
- typing-extensions==4.13.0
- urllib3==1.25.11
- virtualenv==20.29.3
prefix: /opt/conda/envs/lkml
| [
"tests/test_simple.py::test_parse_query_with_filters"
] | [] | [
"tests/test_simple.py::test_parse_token_with_unquoted_literal",
"tests/test_simple.py::test_parse_token_with_quoted_literal",
"tests/test_simple.py::test_parse_pair_with_unquoted_literal",
"tests/test_simple.py::test_parse_pair_with_quoted_literal",
"tests/test_simple.py::test_parse_list_with_unquoted_literals",
"tests/test_simple.py::test_parse_list_with_quoted_literals",
"tests/test_simple.py::test_parse_list_with_many_values",
"tests/test_simple.py::test_parse_list_with_no_values",
"tests/test_simple.py::test_parse_block_with_unquoted_literals",
"tests/test_simple.py::test_parse_block_with_quoted_literals",
"tests/test_simple.py::test_parse_block_with_name",
"tests/test_simple.py::test_parse_block_with_no_fields_and_name",
"tests/test_simple.py::test_parse_block_with_no_fields_and_no_name",
"tests/test_simple.py::test_parse_nested_block",
"tests/test_simple.py::test_parse_any_with_str_value",
"tests/test_simple.py::test_parse_any_with_list_value",
"tests/test_simple.py::test_parse_any_with_dict_value_and_name",
"tests/test_simple.py::test_parse_any_with_dict_value_and_no_name",
"tests/test_simple.py::test_parse_any_raises_with_bad_type",
"tests/test_simple.py::test_expand_list_with_blocks",
"tests/test_simple.py::test_expand_list_with_pairs",
"tests/test_simple.py::test_parse_top_level_pairs",
"tests/test_simple.py::test_parse_query",
"tests/test_simple.py::test_resolve_filters_filter_only_field",
"tests/test_simple.py::test_resolve_filters_legacy_filters",
"tests/test_simple.py::test_resolve_filters_new_filters"
] | [] | MIT License | 20,290 | 155 | [
"lkml/simple.py"
] |
|
zarr-developers__zarr-python-2512 | 501ae9eaf1f5df774b2981db7020a4e6d1490606 | 2024-11-22 03:39:26 | e602aa1d19f26bb06669994231e524c55bcecbeb | diff --git a/src/zarr/core/array.py b/src/zarr/core/array.py
index 71a6f9d3..a6317e7a 100644
--- a/src/zarr/core/array.py
+++ b/src/zarr/core/array.py
@@ -2881,6 +2881,14 @@ class Array:
if hasattr(value, "shape") and len(value.shape) > 1:
value = np.array(value).reshape(-1)
+ if not is_scalar(value, self.dtype) and (
+ isinstance(value, NDArrayLike) and indexer.shape != value.shape
+ ):
+ raise ValueError(
+ f"Attempting to set a selection of {indexer.sel_shape[0]} "
+ f"elements with an array of {value.shape[0]} elements."
+ )
+
sync(self._async_array._set_selection(indexer, value, fields=fields, prototype=prototype))
@_deprecate_positional_args
| Unexpected Vectorized Indexing Setting Behavior
### Zarr version
v3.0.0-beta.1 (main)
### Numcodecs version
0.13.0
### Python Version
3.12
### Operating System
Mac
### Installation
`uv` from repo
### Description
I think this is vectorized indexing...in any case I think that the following example should error since `arr[:][[np.array([1, 2]), np.array([1, 2])]]` is 1d but it allows storing a 2d
### Steps to reproduce
```python
import tempfile
import numpy as np
import zarr
from zarr.storage import LocalStore
shape = (4, 4)
chunks = (2, 2)
fill_value = 32767
tmp = tempfile.TemporaryDirectory()
arr = zarr.create(
shape,
store=LocalStore(root=tmp.name, mode="w"),
chunks=chunks,
dtype=np.int16,
fill_value=fill_value,
codecs=[zarr.codecs.BytesCodec(), zarr.codecs.BloscCodec()],
)
arr[np.array([1, 2]), np.array([1, 2])] = np.array([[-1, -2], [-3, -4]])
print(arr[:])
```
gives
```
[[32767 32767 32767 32767]
[32767 -1 32767 32767]
[32767 32767 -2 32767]
[32767 32767 32767 32767]]
```
which I would not expect - I would probably expect this to error.
### Additional output
_No response_ | zarr-developers/zarr-python | diff --git a/tests/test_indexing.py b/tests/test_indexing.py
index 3dc93ba4..04eb53e3 100644
--- a/tests/test_indexing.py
+++ b/tests/test_indexing.py
@@ -1936,3 +1936,21 @@ def test_zero_sized_chunks(store: StorePath, shape: list[int]) -> None:
z = Array.create(store=store, shape=shape, chunk_shape=shape, zarr_format=3, dtype="f8")
z[...] = 42
assert_array_equal(z[...], np.zeros(shape, dtype="f8"))
+
+
[email protected]("store", ["memory"], indirect=["store"])
+def test_vectorized_indexing_incompatible_shape(store) -> None:
+ # GH2469
+ shape = (4, 4)
+ chunks = (2, 2)
+ fill_value = 32767
+ arr = zarr.create(
+ shape,
+ store=store,
+ chunks=chunks,
+ dtype=np.int16,
+ fill_value=fill_value,
+ codecs=[zarr.codecs.BytesCodec(), zarr.codecs.BloscCodec()],
+ )
+ with pytest.raises(ValueError, match="Attempting to set"):
+ arr[np.array([1, 2]), np.array([1, 2])] = np.array([[-1, -2], [-3, -4]])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"coverage",
"pytest",
"pytest-cov",
"msgpack",
"s3fs",
"pytest-asyncio",
"moto[s3]",
"flask-cors",
"flask",
"requests",
"mypy",
"hypothesis",
"universal-pathlib",
"notebook",
"ipytree>=0.2.2",
"ipywidgets>=8.0.0",
"sphinx==8.1.3",
"sphinx-autobuild>=2021.3.14",
"sphinx-autoapi==3.3.3",
"sphinx_design",
"sphinx-issues",
"sphinx-copybutton",
"pydata-sphinx-theme",
"numpydoc",
"numcodecs[msgpack]",
"msgpack",
"cupy-cuda12x"
],
"pre_install": null,
"python": "3.11",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | accessible-pygments==0.0.5
aiobotocore==2.21.1
aiohappyeyeballs==2.6.1
aiohttp==3.11.14
aioitertools==0.12.0
aiosignal==1.3.2
alabaster==1.0.0
anyio==4.9.0
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
arrow==1.3.0
astroid==3.3.9
asttokens==3.0.0
async-lru==2.0.5
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
blinker==1.9.0
boto3==1.37.1
botocore==1.37.1
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
colorama==0.4.6
comm==0.2.2
coverage==7.8.0
crc32c==2.7.1
cryptography==44.0.2
cupy-cuda12x==13.4.1
debugpy==1.8.13
decorator==5.2.1
defusedxml==0.7.1
Deprecated==1.2.18
docutils==0.21.2
donfig==0.8.1.post1
executing==2.2.0
fastjsonschema==2.21.1
fastrlock==0.8.3
Flask==3.1.0
flask-cors==5.0.1
fqdn==1.5.1
frozenlist==1.5.0
fsspec==2025.3.1
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
hypothesis==6.130.5
idna==3.10
imagesize==1.4.1
iniconfig==2.1.0
ipykernel==6.29.5
ipython==9.0.2
ipython_pygments_lexers==1.1.1
ipytree==0.2.2
ipywidgets==8.1.5
isoduration==20.11.0
itsdangerous==2.2.0
jedi==0.19.2
Jinja2==3.1.6
jmespath==1.0.1
json5==0.10.0
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter-events==0.12.0
jupyter-lsp==2.2.5
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyter_server==2.15.0
jupyter_server_terminals==0.5.3
jupyterlab==4.3.6
jupyterlab_pygments==0.3.0
jupyterlab_server==2.27.3
jupyterlab_widgets==3.0.13
MarkupSafe==3.0.2
matplotlib-inline==0.1.7
mistune==3.1.3
moto==5.1.2
msgpack==1.1.0
multidict==6.2.0
mypy==1.15.0
mypy-extensions==1.0.0
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nest-asyncio==1.6.0
notebook==7.3.3
notebook_shim==0.2.4
numcodecs==0.15.1
numpy==2.2.4
numpydoc==1.8.0
overrides==7.7.0
packaging==24.2
pandocfilters==1.5.1
parso==0.8.4
pexpect==4.9.0
platformdirs==4.3.7
pluggy==1.5.0
prometheus_client==0.21.1
prompt_toolkit==3.0.50
propcache==0.3.1
psutil==7.0.0
ptyprocess==0.7.0
pure_eval==0.2.3
py-partiql-parser==0.6.1
pycparser==2.22
pydata-sphinx-theme==0.16.1
Pygments==2.19.1
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
requests==2.32.3
responses==0.25.7
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
s3fs==2025.3.1
s3transfer==0.11.3
Send2Trash==1.8.3
six==1.17.0
sniffio==1.3.1
snowballstemmer==2.2.0
sortedcontainers==2.4.0
soupsieve==2.6
Sphinx==8.1.3
sphinx-autoapi==3.3.3
sphinx-autobuild==2024.10.3
sphinx-copybutton==0.5.2
sphinx-issues==5.0.0
sphinx_design==0.6.1
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
stack-data==0.6.3
starlette==0.46.1
tabulate==0.9.0
terminado==0.18.1
tinycss2==1.4.0
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
universal_pathlib==0.2.6
uri-template==1.3.0
urllib3==2.3.0
uvicorn==0.34.0
watchfiles==1.0.4
wcwidth==0.2.13
webcolors==24.11.1
webencodings==0.5.1
websocket-client==1.8.0
websockets==15.0.1
Werkzeug==3.1.3
widgetsnbextension==4.0.13
wrapt==1.17.2
xmltodict==0.14.2
yarl==1.18.3
-e git+https://github.com/zarr-developers/zarr-python.git@501ae9eaf1f5df774b2981db7020a4e6d1490606#egg=zarr
| name: zarr-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py311h06a4308_0
- python=3.11.11=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py311h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py311h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- accessible-pygments==0.0.5
- aiobotocore==2.21.1
- aiohappyeyeballs==2.6.1
- aiohttp==3.11.14
- aioitertools==0.12.0
- aiosignal==1.3.2
- alabaster==1.0.0
- anyio==4.9.0
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- arrow==1.3.0
- astroid==3.3.9
- asttokens==3.0.0
- async-lru==2.0.5
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- blinker==1.9.0
- boto3==1.37.1
- botocore==1.37.1
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- colorama==0.4.6
- comm==0.2.2
- coverage==7.8.0
- crc32c==2.7.1
- cryptography==44.0.2
- cupy-cuda12x==13.4.1
- debugpy==1.8.13
- decorator==5.2.1
- defusedxml==0.7.1
- deprecated==1.2.18
- docutils==0.21.2
- donfig==0.8.1.post1
- executing==2.2.0
- fastjsonschema==2.21.1
- fastrlock==0.8.3
- flask==3.1.0
- flask-cors==5.0.1
- fqdn==1.5.1
- frozenlist==1.5.0
- fsspec==2025.3.1
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- hypothesis==6.130.5
- idna==3.10
- imagesize==1.4.1
- iniconfig==2.1.0
- ipykernel==6.29.5
- ipython==9.0.2
- ipython-pygments-lexers==1.1.1
- ipytree==0.2.2
- ipywidgets==8.1.5
- isoduration==20.11.0
- itsdangerous==2.2.0
- jedi==0.19.2
- jinja2==3.1.6
- jmespath==1.0.1
- json5==0.10.0
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter-client==8.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyter-lsp==2.2.5
- jupyter-server==2.15.0
- jupyter-server-terminals==0.5.3
- jupyterlab==4.3.6
- jupyterlab-pygments==0.3.0
- jupyterlab-server==2.27.3
- jupyterlab-widgets==3.0.13
- markupsafe==3.0.2
- matplotlib-inline==0.1.7
- mistune==3.1.3
- moto==5.1.2
- msgpack==1.1.0
- multidict==6.2.0
- mypy==1.15.0
- mypy-extensions==1.0.0
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nest-asyncio==1.6.0
- notebook==7.3.3
- notebook-shim==0.2.4
- numcodecs==0.15.1
- numpy==2.2.4
- numpydoc==1.8.0
- overrides==7.7.0
- packaging==24.2
- pandocfilters==1.5.1
- parso==0.8.4
- pexpect==4.9.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prometheus-client==0.21.1
- prompt-toolkit==3.0.50
- propcache==0.3.1
- psutil==7.0.0
- ptyprocess==0.7.0
- pure-eval==0.2.3
- py-partiql-parser==0.6.1
- pycparser==2.22
- pydata-sphinx-theme==0.16.1
- pygments==2.19.1
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- responses==0.25.7
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- s3fs==2025.3.1
- s3transfer==0.11.3
- send2trash==1.8.3
- six==1.17.0
- sniffio==1.3.1
- snowballstemmer==2.2.0
- sortedcontainers==2.4.0
- soupsieve==2.6
- sphinx==8.1.3
- sphinx-autoapi==3.3.3
- sphinx-autobuild==2024.10.3
- sphinx-copybutton==0.5.2
- sphinx-design==0.6.1
- sphinx-issues==5.0.0
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- stack-data==0.6.3
- starlette==0.46.1
- tabulate==0.9.0
- terminado==0.18.1
- tinycss2==1.4.0
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- universal-pathlib==0.2.6
- uri-template==1.3.0
- urllib3==2.3.0
- uvicorn==0.34.0
- watchfiles==1.0.4
- wcwidth==0.2.13
- webcolors==24.11.1
- webencodings==0.5.1
- websocket-client==1.8.0
- websockets==15.0.1
- werkzeug==3.1.3
- widgetsnbextension==4.0.13
- wrapt==1.17.2
- xmltodict==0.14.2
- yarl==1.18.3
- zarr==3.0.0b3.dev16+g501ae9ea
prefix: /opt/conda/envs/zarr-python
| [
"tests/test_indexing.py::test_vectorized_indexing_incompatible_shape[memory]"
] | [] | [
"tests/test_indexing.py::test_normalize_integer_selection",
"tests/test_indexing.py::test_replace_ellipsis",
"tests/test_indexing.py::test_get_basic_selection_0d[True-42-uint8]",
"tests/test_indexing.py::test_get_basic_selection_0d[False-42-uint8]",
"tests/test_indexing.py::test_get_basic_selection_1d",
"tests/test_indexing.py::test_get_basic_selection_2d",
"tests/test_indexing.py::test_fancy_indexing_fallback_on_get_setitem",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index0-expected_result0]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index1-expected_result1]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index2-expected_result2]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index3-expected_result3]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index4-expected_result4]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index5-expected_result5]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_2d[index6-expected_result6]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_3d[index0-expected_result0]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_3d[index1-expected_result1]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_3d[index2-expected_result2]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_getitem_3d[index3-expected_result3]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_setitem_2d[index0-expected_result0]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_setitem_2d[index1-expected_result1]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_setitem_2d[index2-expected_result2]",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_setitem_2d[index3-expected_result3]",
"tests/test_indexing.py::test_fancy_indexing_doesnt_mix_with_implicit_slicing",
"tests/test_indexing.py::test_set_basic_selection_0d[42-uint8]",
"tests/test_indexing.py::test_get_orthogonal_selection_1d_bool",
"tests/test_indexing.py::test_get_orthogonal_selection_1d_int",
"tests/test_indexing.py::test_get_orthogonal_selection_2d",
"tests/test_indexing.py::test_get_orthogonal_selection_3d",
"tests/test_indexing.py::test_orthogonal_indexing_edge_cases",
"tests/test_indexing.py::test_set_orthogonal_selection_1d",
"tests/test_indexing.py::test_set_orthogonal_selection_2d",
"tests/test_indexing.py::test_set_orthogonal_selection_3d",
"tests/test_indexing.py::test_orthogonal_indexing_fallback_on_get_setitem",
"tests/test_indexing.py::test_get_coordinate_selection_1d",
"tests/test_indexing.py::test_get_coordinate_selection_2d",
"tests/test_indexing.py::test_set_coordinate_selection_1d",
"tests/test_indexing.py::test_set_coordinate_selection_2d",
"tests/test_indexing.py::test_get_block_selection_1d",
"tests/test_indexing.py::test_get_block_selection_2d",
"tests/test_indexing.py::test_set_block_selection_1d",
"tests/test_indexing.py::test_set_block_selection_2d",
"tests/test_indexing.py::test_get_mask_selection_1d",
"tests/test_indexing.py::test_get_mask_selection_2d",
"tests/test_indexing.py::test_set_mask_selection_1d",
"tests/test_indexing.py::test_set_mask_selection_2d",
"tests/test_indexing.py::test_get_selection_out",
"tests/test_indexing.py::test_slice_selection_uints",
"tests/test_indexing.py::test_numpy_int_indexing",
"tests/test_indexing.py::test_accessed_chunks[shape0-chunks0-ops0]",
"tests/test_indexing.py::test_accessed_chunks[shape1-chunks1-ops1]",
"tests/test_indexing.py::test_accessed_chunks[shape2-chunks2-ops2]",
"tests/test_indexing.py::test_accessed_chunks[shape3-chunks3-ops3]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection0]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection1]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection2]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection3]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection4]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection5]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection6]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection7]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection8]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection9]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection10]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection11]",
"tests/test_indexing.py::test_indexing_equals_numpy[selection12]",
"tests/test_indexing.py::test_orthogonal_bool_indexing_like_numpy_ix[selection0]",
"tests/test_indexing.py::test_orthogonal_bool_indexing_like_numpy_ix[selection1]",
"tests/test_indexing.py::test_orthogonal_bool_indexing_like_numpy_ix[selection2]",
"tests/test_indexing.py::test_orthogonal_bool_indexing_like_numpy_ix[selection3]",
"tests/test_indexing.py::test_iter_grid[None-None-1]",
"tests/test_indexing.py::test_iter_grid[None-None-2]",
"tests/test_indexing.py::test_iter_grid[None-None-3]",
"tests/test_indexing.py::test_iter_grid[None-origin_0d1-1]",
"tests/test_indexing.py::test_iter_grid[None-origin_0d1-2]",
"tests/test_indexing.py::test_iter_grid[None-origin_0d1-3]",
"tests/test_indexing.py::test_iter_grid[None-origin_0d2-1]",
"tests/test_indexing.py::test_iter_grid[None-origin_0d2-2]",
"tests/test_indexing.py::test_iter_grid[None-origin_0d2-3]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-None-1]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-None-2]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-None-3]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-origin_0d1-1]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-origin_0d1-2]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-origin_0d1-3]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-origin_0d2-1]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-origin_0d2-2]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d1-origin_0d2-3]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-None-1]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-None-2]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-None-3]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-origin_0d1-1]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-origin_0d1-2]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-origin_0d1-3]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-origin_0d2-1]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-origin_0d2-2]",
"tests/test_indexing.py::test_iter_grid[selection_shape_0d2-origin_0d2-3]",
"tests/test_indexing.py::test_iter_grid_invalid",
"tests/test_indexing.py::test_indexing_with_zarr_array",
"tests/test_indexing.py::test_zero_sized_chunks[shape0-local]",
"tests/test_indexing.py::test_zero_sized_chunks[shape0-memory]",
"tests/test_indexing.py::test_zero_sized_chunks[0-local]",
"tests/test_indexing.py::test_zero_sized_chunks[0-memory]",
"tests/test_indexing.py::test_zero_sized_chunks[shape2-local]",
"tests/test_indexing.py::test_zero_sized_chunks[shape2-memory]"
] | [] | MIT License | 20,292 | 221 | [
"src/zarr/core/array.py"
] |
|
proxmoxer__proxmoxer-189 | 80b6bd931df446f510dd0689a99e25c13e980bea | 2024-11-22 23:25:36 | 80b6bd931df446f510dd0689a99e25c13e980bea | jhollowe: Please add tests for this new functionality
asdorsey: Test added, it's basically a copy of `test_request_data` but with the data value in bytes format. | diff --git a/proxmoxer/backends/command_base.py b/proxmoxer/backends/command_base.py
index f8ca4a5..4d8e7b0 100644
--- a/proxmoxer/backends/command_base.py
+++ b/proxmoxer/backends/command_base.py
@@ -88,7 +88,12 @@ class CommandBaseSession:
command = [f"{self.service}sh", cmd, url]
# convert the options dict into a 2-tuple with the key formatted as a flag
- option_pairs = [(f"-{k}", str(v)) for k, v in chain(data.items(), params.items())]
+ option_pairs = []
+ for k, v in chain(data.items(), params.items()):
+ try:
+ option_pairs.append((f"-{k}", str(v, "utf-8")))
+ except TypeError:
+ option_pairs.append((f"-{k}", str(v)))
# add back in all the command arguments as their own pairs
if data_command is not None:
if isinstance(data_command, list):
| SSH backends choke on bytes as parameters
If bytes are passed as parameters to proxmoxer, they end up formatted like `b'this is a parameter'` instead of `this is a parameter`. This makes pvesh choke when the actual API call is made.
I have a fix, will create PR immediately after this ticket. | proxmoxer/proxmoxer | diff --git a/tests/test_command_base.py b/tests/test_command_base.py
index c7b6b4a..6306eed 100644
--- a/tests/test_command_base.py
+++ b/tests/test_command_base.py
@@ -122,6 +122,22 @@ class TestCommandBaseSession:
"json",
]
+ def test_request_bytes_data(self, mock_exec):
+ resp = self._session.request(
+ "GET", self.base_url + "/fake/echo", data={"key": b"bytes-value"}
+ )
+
+ assert resp.status_code == 200
+ assert resp.content == [
+ "pvesh",
+ "get",
+ self.base_url + "/fake/echo",
+ "-key",
+ "bytes-value",
+ "--output-format",
+ "json",
+ ]
+
def test_request_qemu_exec(self, mock_exec):
resp = self._session.request(
"POST",
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"test_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | bcrypt==4.3.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
coverage==7.8.0
coveralls==4.0.1
cryptography==44.0.2
docopt==0.6.2
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
openssh-wrapper==0.4
packaging==24.2
paramiko==3.5.1
pluggy==1.5.0
-e git+https://github.com/proxmoxer/proxmoxer.git@80b6bd931df446f510dd0689a99e25c13e980bea#egg=proxmoxer
pycparser==2.22
PyNaCl==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
PyYAML==6.0.2
requests==2.32.3
requests-toolbelt==1.0.0
responses==0.25.7
tomli==2.2.1
urllib3==2.3.0
| name: proxmoxer
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- bcrypt==4.3.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- coverage==7.8.0
- coveralls==4.0.1
- cryptography==44.0.2
- docopt==0.6.2
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- openssh-wrapper==0.4
- packaging==24.2
- paramiko==3.5.1
- pluggy==1.5.0
- proxmoxer==2.1.0
- pycparser==2.22
- pynacl==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pyyaml==6.0.2
- requests==2.32.3
- requests-toolbelt==1.0.0
- responses==0.25.7
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/proxmoxer
| [
"tests/test_command_base.py::TestCommandBaseSession::test_request_bytes_data"
] | [] | [
"tests/test_command_base.py::TestResponse::test_init_all_args",
"tests/test_command_base.py::TestCommandBaseSession::test_init_all_args",
"tests/test_command_base.py::TestCommandBaseSession::test_exec",
"tests/test_command_base.py::TestCommandBaseSession::test_upload_file_obj",
"tests/test_command_base.py::TestCommandBaseSession::test_request_basic",
"tests/test_command_base.py::TestCommandBaseSession::test_request_task",
"tests/test_command_base.py::TestCommandBaseSession::test_request_error",
"tests/test_command_base.py::TestCommandBaseSession::test_request_error_generic",
"tests/test_command_base.py::TestCommandBaseSession::test_request_sudo",
"tests/test_command_base.py::TestCommandBaseSession::test_request_data",
"tests/test_command_base.py::TestCommandBaseSession::test_request_qemu_exec",
"tests/test_command_base.py::TestCommandBaseSession::test_request_qemu_exec_list",
"tests/test_command_base.py::TestCommandBaseSession::test_request_upload",
"tests/test_command_base.py::TestJsonSimpleSerializer::test_loads_pass",
"tests/test_command_base.py::TestJsonSimpleSerializer::test_loads_not_json",
"tests/test_command_base.py::TestJsonSimpleSerializer::test_loads_not_unicode",
"tests/test_command_base.py::TestCommandBaseBackend::test_init",
"tests/test_command_base.py::TestCommandBaseBackend::test_get_session",
"tests/test_command_base.py::TestCommandBaseBackend::test_get_base_url",
"tests/test_command_base.py::TestCommandBaseBackend::test_get_serializer"
] | [] | MIT License | 20,301 | 244 | [
"proxmoxer/backends/command_base.py"
] |
narwhals-dev__narwhals-1432 | 35c34f482cbdfaf6510b6ffce910226b16b65e85 | 2024-11-23 12:37:03 | ea1a64fcd5a1a01202b878648c51944d524bdd3a | diff --git a/narwhals/_arrow/series.py b/narwhals/_arrow/series.py
index ee06f1fa..f286b300 100644
--- a/narwhals/_arrow/series.py
+++ b/narwhals/_arrow/series.py
@@ -14,6 +14,7 @@ from narwhals._arrow.utils import narwhals_to_native_dtype
from narwhals._arrow.utils import native_to_narwhals_dtype
from narwhals._arrow.utils import parse_datetime_format
from narwhals._arrow.utils import validate_column_comparand
+from narwhals.translate import to_py_scalar
from narwhals.utils import Implementation
from narwhals.utils import generate_temporary_column_name
@@ -437,12 +438,12 @@ class ArrowSeries:
def any(self) -> bool:
import pyarrow.compute as pc # ignore-banned-import()
- return pc.any(self._native_series) # type: ignore[no-any-return]
+ return to_py_scalar(pc.any(self._native_series)) # type: ignore[no-any-return]
def all(self) -> bool:
import pyarrow.compute as pc # ignore-banned-import()
- return pc.all(self._native_series) # type: ignore[no-any-return]
+ return to_py_scalar(pc.all(self._native_series)) # type: ignore[no-any-return]
def is_between(
self, lower_bound: Any, upper_bound: Any, closed: str = "both"
@@ -719,9 +720,10 @@ class ArrowSeries:
ser = self._native_series
if descending:
- return pc.all(pc.greater_equal(ser[:-1], ser[1:])) # type: ignore[no-any-return]
+ result = pc.all(pc.greater_equal(ser[:-1], ser[1:]))
else:
- return pc.all(pc.less_equal(ser[:-1], ser[1:])) # type: ignore[no-any-return]
+ result = pc.all(pc.less_equal(ser[:-1], ser[1:]))
+ return to_py_scalar(result) # type: ignore[no-any-return]
def unique(self: Self, *, maintain_order: bool = False) -> ArrowSeries:
# The param `maintain_order` is only here for compatibility with the Polars API
diff --git a/narwhals/_arrow/utils.py b/narwhals/_arrow/utils.py
index 0a9fd2b8..0f773dfd 100644
--- a/narwhals/_arrow/utils.py
+++ b/narwhals/_arrow/utils.py
@@ -4,7 +4,6 @@ from typing import TYPE_CHECKING
from typing import Any
from typing import Sequence
-from narwhals.dependencies import get_polars
from narwhals.utils import isinstance_or_issubclass
if TYPE_CHECKING:
@@ -77,17 +76,6 @@ def native_to_narwhals_dtype(dtype: pa.DataType, dtypes: DTypes) -> DType:
def narwhals_to_native_dtype(dtype: DType | type[DType], dtypes: DTypes) -> Any:
- if (pl := get_polars()) is not None and isinstance(
- dtype, (pl.DataType, pl.DataType.__class__)
- ):
- msg = (
- f"Expected Narwhals object, got: {type(dtype)}.\n\n"
- "Perhaps you:\n"
- "- Forgot a `nw.from_native` somewhere?\n"
- "- Used `pl.Int64` instead of `nw.Int64`?"
- )
- raise TypeError(msg)
-
import pyarrow as pa # ignore-banned-import
if isinstance_or_issubclass(dtype, dtypes.Float64):
diff --git a/narwhals/_dask/utils.py b/narwhals/_dask/utils.py
index ef9e7439..88f2fdb7 100644
--- a/narwhals/_dask/utils.py
+++ b/narwhals/_dask/utils.py
@@ -4,7 +4,6 @@ from typing import TYPE_CHECKING
from typing import Any
from narwhals.dependencies import get_pandas
-from narwhals.dependencies import get_polars
from narwhals.dependencies import get_pyarrow
from narwhals.exceptions import InvalidIntoExprError
from narwhals.utils import isinstance_or_issubclass
@@ -86,17 +85,6 @@ def validate_comparand(lhs: dask_expr.Series, rhs: dask_expr.Series) -> None:
def narwhals_to_native_dtype(dtype: DType | type[DType], dtypes: DTypes) -> Any:
- if (pl := get_polars()) is not None and isinstance(
- dtype, (pl.DataType, pl.DataType.__class__)
- ):
- msg = (
- f"Expected Narwhals object, got: {type(dtype)}.\n\n"
- "Perhaps you:\n"
- "- Forgot a `nw.from_native` somewhere?\n"
- "- Used `pl.Int64` instead of `nw.Int64`?"
- )
- raise TypeError(msg)
-
if isinstance_or_issubclass(dtype, dtypes.Float64):
return "float64"
if isinstance_or_issubclass(dtype, dtypes.Float32):
diff --git a/narwhals/_pandas_like/utils.py b/narwhals/_pandas_like/utils.py
index ba5eece4..637df9e0 100644
--- a/narwhals/_pandas_like/utils.py
+++ b/narwhals/_pandas_like/utils.py
@@ -11,7 +11,6 @@ from typing import TypeVar
from narwhals._arrow.utils import (
native_to_narwhals_dtype as arrow_native_to_narwhals_dtype,
)
-from narwhals.dependencies import get_polars
from narwhals.exceptions import ColumnNotFoundError
from narwhals.utils import Implementation
from narwhals.utils import isinstance_or_issubclass
@@ -384,17 +383,6 @@ def narwhals_to_native_dtype( # noqa: PLR0915
backend_version: tuple[int, ...],
dtypes: DTypes,
) -> Any:
- if (pl := get_polars()) is not None and isinstance(
- dtype, (pl.DataType, pl.DataType.__class__)
- ):
- msg = (
- f"Expected Narwhals object, got: {type(dtype)}.\n\n"
- "Perhaps you:\n"
- "- Forgot a `nw.from_native` somewhere?\n"
- "- Used `pl.Int64` instead of `nw.Int64`?"
- )
- raise TypeError(msg)
-
dtype_backend = get_dtype_backend(starting_dtype, implementation)
if isinstance_or_issubclass(dtype, dtypes.Float64):
if dtype_backend == "pyarrow-nullable":
diff --git a/narwhals/_polars/utils.py b/narwhals/_polars/utils.py
index 79528af1..c25b59db 100644
--- a/narwhals/_polars/utils.py
+++ b/narwhals/_polars/utils.py
@@ -99,15 +99,6 @@ def native_to_narwhals_dtype(dtype: pl.DataType, dtypes: DTypes) -> DType:
def narwhals_to_native_dtype(dtype: DType | type[DType], dtypes: DTypes) -> pl.DataType:
import polars as pl # ignore-banned-import()
- if isinstance(dtype, (pl.DataType, pl.DataType.__class__)): # type: ignore[arg-type]
- msg = (
- f"Expected Narwhals object, got: {type(dtype)}.\n\n"
- "Perhaps you:\n"
- "- Forgot a `nw.from_native` somewhere?\n"
- "- Used `pl.Int64` instead of `nw.Int64`?"
- )
- raise TypeError(msg)
-
if dtype == dtypes.Float64:
return pl.Float64()
if dtype == dtypes.Float32:
diff --git a/narwhals/dtypes.py b/narwhals/dtypes.py
index 719bd989..e214213c 100644
--- a/narwhals/dtypes.py
+++ b/narwhals/dtypes.py
@@ -5,6 +5,8 @@ from datetime import timezone
from typing import TYPE_CHECKING
from typing import Mapping
+from narwhals.utils import isinstance_or_issubclass
+
if TYPE_CHECKING:
from typing import Iterator
from typing import Literal
@@ -13,6 +15,15 @@ if TYPE_CHECKING:
from typing_extensions import Self
+def _validate_dtype(dtype: DType | type[DType]) -> None:
+ if not isinstance_or_issubclass(dtype, DType):
+ msg = (
+ f"Expected Narwhals dtype, got: {type(dtype)}.\n\n"
+ "Hint: if you were trying to cast to a type, use e.g. nw.Int64 instead of 'int64'."
+ )
+ raise TypeError(msg)
+
+
class DType:
def __repr__(self) -> str: # pragma: no cover
return self.__class__.__qualname__
diff --git a/narwhals/expr.py b/narwhals/expr.py
index a43485e0..9c0a484a 100644
--- a/narwhals/expr.py
+++ b/narwhals/expr.py
@@ -11,6 +11,7 @@ from typing import Sequence
from typing import TypeVar
from narwhals.dependencies import is_numpy_array
+from narwhals.dtypes import _validate_dtype
from narwhals.utils import _validate_rolling_arguments
from narwhals.utils import flatten
@@ -202,6 +203,7 @@ class Expr:
foo: [[1,2,3]]
bar: [[6,7,8]]
"""
+ _validate_dtype(dtype)
return self.__class__(
lambda plx: self._call(plx).cast(dtype),
)
diff --git a/narwhals/series.py b/narwhals/series.py
index a5224cea..8ffe0b0f 100644
--- a/narwhals/series.py
+++ b/narwhals/series.py
@@ -11,6 +11,7 @@ from typing import Sequence
from typing import TypeVar
from typing import overload
+from narwhals.dtypes import _validate_dtype
from narwhals.utils import _validate_rolling_arguments
from narwhals.utils import parse_version
@@ -516,6 +517,7 @@ class Series:
1
]
"""
+ _validate_dtype(dtype)
return self._from_compliant_series(self._compliant_series.cast(dtype))
def to_frame(self) -> DataFrame[Any]:
diff --git a/narwhals/utils.py b/narwhals/utils.py
index d3cb0dda..37160038 100644
--- a/narwhals/utils.py
+++ b/narwhals/utils.py
@@ -162,7 +162,7 @@ def isinstance_or_issubclass(obj: Any, cls: Any) -> bool:
if isinstance(obj, DType):
return isinstance(obj, cls)
- return isinstance(obj, cls) or issubclass(obj, cls)
+ return isinstance(obj, cls) or (isinstance(obj, type) and issubclass(obj, cls))
def validate_laziness(items: Iterable[Any]) -> None:
| RFC: Series aggregations which return booleans should always return Python booleans
In general, we're trying to let libraries handle their own scalars, so that they can choose to preserve precision or keep data on a different device
But, for functions which aggregate to a single boolean, I think we should just return a Python bool. If someone calls a function which returns a boolean, then they're almost certainly using it with `if` and would probably not think to call `nw.to_py_scalar`. I think here the risks outweight the benefits, and so for booleans, we should always return Python bool
What's really unintuitive about pyarrow scalars is this:
```
In [4]: if pa.scalar(False): print('hey')
hey
``` | narwhals-dev/narwhals | diff --git a/tests/expr_and_series/cast_test.py b/tests/expr_and_series/cast_test.py
index 14e77d68..992ea5f5 100644
--- a/tests/expr_and_series/cast_test.py
+++ b/tests/expr_and_series/cast_test.py
@@ -175,16 +175,11 @@ def test_cast_string() -> None:
def test_cast_raises_for_unknown_dtype(
- constructor: Constructor,
- request: pytest.FixtureRequest,
+ constructor: Constructor, request: pytest.FixtureRequest
) -> None:
- if "pyarrow_table_constructor" in str(constructor) and PYARROW_VERSION <= (
- 15,
- ): # pragma: no cover
+ if "pyarrow_table" in str(constructor) and PYARROW_VERSION < (15,):
+ # Unsupported cast from string to dictionary using function cast_dictionary
request.applymarker(pytest.mark.xfail)
- if "polars" in str(constructor):
- request.applymarker(pytest.mark.xfail)
-
df = nw.from_native(constructor(data)).select(
nw.col(key).cast(value) for key, value in schema.items()
)
@@ -192,7 +187,7 @@ def test_cast_raises_for_unknown_dtype(
class Banana:
pass
- with pytest.raises(AssertionError, match=r"Unknown dtype"):
+ with pytest.raises(TypeError, match="Expected Narwhals dtype"):
df.select(nw.col("a").cast(Banana)) # type: ignore[arg-type]
@@ -229,5 +224,5 @@ def test_cast_datetime_tz_aware(
@pytest.mark.parametrize("dtype", [pl.String, pl.String()])
def test_raise_if_polars_dtype(constructor: Constructor, dtype: Any) -> None:
df = nw.from_native(constructor({"a": [1, 2, 3], "b": [4, 5, 6]}))
- with pytest.raises(TypeError, match="Expected Narwhals object, got:"):
+ with pytest.raises(TypeError, match="Expected Narwhals dtype, got:"):
df.select(nw.col("a").cast(dtype))
diff --git a/tests/frame/invalid_test.py b/tests/frame/invalid_test.py
index 811d0430..9abf4bd2 100644
--- a/tests/frame/invalid_test.py
+++ b/tests/frame/invalid_test.py
@@ -19,7 +19,7 @@ def test_invalid() -> None:
df.select(nw.all() + nw.all())
with pytest.raises(TypeError, match="Perhaps you:"):
df.select([pl.col("a")]) # type: ignore[list-item]
- with pytest.raises(TypeError, match="Perhaps you:"):
+ with pytest.raises(TypeError, match="Expected Narwhals dtype"):
df.select([nw.col("a").cast(pl.Int64)]) # type: ignore[arg-type]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 9
} | 1.14 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
cfgv==3.4.0
click==8.1.8
cloudpickle==3.1.1
covdefaults==2.3.0
coverage==7.8.0
dask==2024.8.0
dask-expr==1.1.10
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
filelock==3.18.0
fsspec==2025.3.1
hypothesis==6.130.5
identify==2.6.9
importlib_metadata==8.6.1
iniconfig==2.1.0
joblib==1.4.2
locket==1.0.0
-e git+https://github.com/narwhals-dev/narwhals.git@35c34f482cbdfaf6510b6ffce910226b16b65e85#egg=narwhals
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
partd==1.4.2
platformdirs==4.3.7
pluggy==1.5.0
polars==1.26.0
pre_commit==4.2.0
pyarrow==19.0.1
pytest==8.3.5
pytest-cov==6.0.0
pytest-env==1.1.5
pytest-randomly==3.16.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
sortedcontainers==2.4.0
threadpoolctl==3.6.0
tomli==2.2.1
toolz==1.0.0
tqdm==4.67.1
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
zipp==3.21.0
| name: narwhals
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- cfgv==3.4.0
- click==8.1.8
- cloudpickle==3.1.1
- covdefaults==2.3.0
- coverage==7.8.0
- dask==2024.8.0
- dask-expr==1.1.10
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- fsspec==2025.3.1
- hypothesis==6.130.5
- identify==2.6.9
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- joblib==1.4.2
- locket==1.0.0
- narwhals==1.14.1
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- partd==1.4.2
- platformdirs==4.3.7
- pluggy==1.5.0
- polars==1.26.0
- pre-commit==4.2.0
- pyarrow==19.0.1
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-env==1.1.5
- pytest-randomly==3.16.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- sortedcontainers==2.4.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- toolz==1.0.0
- tqdm==4.67.1
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/narwhals
| [
"tests/expr_and_series/cast_test.py::test_cast_raises_for_unknown_dtype[pandas_nullable_constructor]",
"tests/expr_and_series/cast_test.py::test_cast_raises_for_unknown_dtype[pandas_constructor]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[pandas_nullable_constructor-String]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[dask_lazy_p2_constructor-dtype1]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[pandas_constructor-String]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[pyarrow_table_constructor-dtype1]",
"tests/expr_and_series/cast_test.py::test_cast_raises_for_unknown_dtype[pandas_pyarrow_constructor]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[polars_lazy_constructor-dtype1]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[pandas_constructor-dtype1]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[pandas_pyarrow_constructor-String]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[pyarrow_table_constructor-String]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[polars_eager_constructor-dtype1]",
"tests/expr_and_series/cast_test.py::test_cast_raises_for_unknown_dtype[polars_lazy_constructor]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[pandas_pyarrow_constructor-dtype1]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[dask_lazy_p2_constructor-String]",
"tests/expr_and_series/cast_test.py::test_cast_raises_for_unknown_dtype[pyarrow_table_constructor]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[pandas_nullable_constructor-dtype1]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[polars_eager_constructor-String]",
"tests/expr_and_series/cast_test.py::test_cast_raises_for_unknown_dtype[dask_lazy_p2_constructor]",
"tests/expr_and_series/cast_test.py::test_raise_if_polars_dtype[polars_lazy_constructor-String]",
"tests/expr_and_series/cast_test.py::test_cast_raises_for_unknown_dtype[polars_eager_constructor]",
"tests/frame/invalid_test.py::test_invalid"
] | [] | [
"tests/expr_and_series/cast_test.py::test_cast_datetime_tz_aware[polars_lazy_constructor]",
"tests/expr_and_series/cast_test.py::test_cast_datetime_tz_aware[pandas_pyarrow_constructor]",
"tests/expr_and_series/cast_test.py::test_cast[polars_eager_constructor]",
"tests/expr_and_series/cast_test.py::test_cast_series[pandas_constructor]",
"tests/expr_and_series/cast_test.py::test_cast_series[polars_eager_constructor]",
"tests/expr_and_series/cast_test.py::test_cast_series[polars_lazy_constructor]",
"tests/expr_and_series/cast_test.py::test_cast[pandas_pyarrow_constructor]",
"tests/expr_and_series/cast_test.py::test_cast_datetime_tz_aware[polars_eager_constructor]",
"tests/expr_and_series/cast_test.py::test_cast_datetime_tz_aware[pyarrow_table_constructor]",
"tests/expr_and_series/cast_test.py::test_cast_series[dask_lazy_p2_constructor]",
"tests/expr_and_series/cast_test.py::test_cast[pyarrow_table_constructor]",
"tests/expr_and_series/cast_test.py::test_cast_series[pandas_pyarrow_constructor]",
"tests/expr_and_series/cast_test.py::test_cast_series[pyarrow_table_constructor]",
"tests/expr_and_series/cast_test.py::test_cast_datetime_tz_aware[pandas_constructor]",
"tests/expr_and_series/cast_test.py::test_cast[dask_lazy_p2_constructor]",
"tests/expr_and_series/cast_test.py::test_cast_string",
"tests/expr_and_series/cast_test.py::test_cast_series[pandas_nullable_constructor]",
"tests/expr_and_series/cast_test.py::test_cast[polars_lazy_constructor]",
"tests/expr_and_series/cast_test.py::test_cast[pandas_nullable_constructor]",
"tests/expr_and_series/cast_test.py::test_cast[pandas_constructor]",
"tests/expr_and_series/cast_test.py::test_cast_datetime_tz_aware[pandas_nullable_constructor]",
"tests/frame/invalid_test.py::test_validate_laziness",
"tests/frame/invalid_test.py::test_native_vs_non_native",
"tests/frame/invalid_test.py::test_memmap"
] | [] | MIT License | 20,305 | 2,675 | [
"narwhals/_arrow/series.py",
"narwhals/_arrow/utils.py",
"narwhals/_dask/utils.py",
"narwhals/_pandas_like/utils.py",
"narwhals/_polars/utils.py",
"narwhals/dtypes.py",
"narwhals/expr.py",
"narwhals/series.py",
"narwhals/utils.py"
] |
|
Delgan__loguru-1239 | 65fe4a8db9a8f297ae3648f51b5e3050b30945a9 | 2024-11-24 13:03:50 | 55776bbcfea82ee797ee2189d770cbcaac2ea8b6 | diff --git a/loguru/_better_exceptions.py b/loguru/_better_exceptions.py
index e9ee112..17d36d6 100644
--- a/loguru/_better_exceptions.py
+++ b/loguru/_better_exceptions.py
@@ -534,13 +534,39 @@ class ExceptionFormatter:
yield from self._indent("-" * 35, group_nesting + 1, prefix="+-")
def _format_list(self, frames):
- result = []
- for filename, lineno, name, line in frames:
- row = []
- row.append(' File "{}", line {}, in {}\n'.format(filename, lineno, name))
+
+ def source_message(filename, lineno, name, line):
+ message = ' File "%s", line %d, in %s\n' % (filename, lineno, name)
if line:
- row.append(" {}\n".format(line.strip()))
- result.append("".join(row))
+ message += " %s\n" % line.strip()
+ return message
+
+ def skip_message(count):
+ plural = "s" if count > 1 else ""
+ return " [Previous line repeated %d more time%s]\n" % (count, plural)
+
+ result = []
+ count = 0
+ last_source = None
+
+ for *source, line in frames:
+ if source != last_source and count > 3:
+ result.append(skip_message(count - 3))
+
+ if source == last_source:
+ count += 1
+ if count > 3:
+ continue
+ else:
+ count = 1
+
+ result.append(source_message(*source, line))
+ last_source = source
+
+ # Add a final skip message if the iteration of frames ended mid-repetition.
+ if count > 3:
+ result.append(skip_message(count - 3))
+
return result
def format_exception(self, type_, value, tb, *, from_decorator=False):
| Add unit tests for traceback formatting of RecursionError
For example, the following snippet:
```python
from loguru import logger
def rec():
rec()
try:
rec()
except Exception:
logger.exception("Oups")
```
Should produce the following logs:
```txt
2024-02-17 12:06:00.577 | ERROR | __main__:<module>:9 - Oups
Traceback (most recent call last):
> File "/home/delgan/Code/loguru/script.py", line 7, in <module>
rec()
└ <function rec at 0x7a13f1314720>
File "/home/delgan/Code/loguru/script.py", line 4, in rec
rec()
└ <function rec at 0x7a13f1314720>
File "/home/delgan/Code/loguru/script.py", line 4, in rec
rec()
└ <function rec at 0x7a13f1314720>
File "/home/delgan/Code/loguru/script.py", line 4, in rec
rec()
└ <function rec at 0x7a13f1314720>
[Previous line repeated 996 more times]
RecursionError: maximum recursion depth exceeded
```
Note the "[Previous line repeated 996 more times]".
| Delgan/loguru | diff --git a/tests/exceptions/output/others/one_liner_recursion.txt b/tests/exceptions/output/others/one_liner_recursion.txt
new file mode 100644
index 0000000..635d3a2
--- /dev/null
+++ b/tests/exceptions/output/others/one_liner_recursion.txt
@@ -0,0 +1,79 @@
+
+Traceback (most recent call last):
+ File "tests/exceptions/source/others/one_liner_recursion.py", line 14, in <module>
+ rec = lambda r, i: 1 / 0 if i == 0 else r(r, i - 1); rec(rec, 10)
+ File "tests/exceptions/source/others/one_liner_recursion.py", line 14, in <lambda>
+ rec = lambda r, i: 1 / 0 if i == 0 else r(r, i - 1); rec(rec, 10)
+ File "tests/exceptions/source/others/one_liner_recursion.py", line 14, in <lambda>
+ rec = lambda r, i: 1 / 0 if i == 0 else r(r, i - 1); rec(rec, 10)
+ File "tests/exceptions/source/others/one_liner_recursion.py", line 14, in <lambda>
+ rec = lambda r, i: 1 / 0 if i == 0 else r(r, i - 1); rec(rec, 10)
+ [Previous line repeated 8 more times]
+ZeroDivisionError: division by zero
+
+Traceback (most recent call last):
+> File "tests/exceptions/source/others/one_liner_recursion.py", line 14, in <module>
+ rec = lambda r, i: 1 / 0 if i == 0 else r(r, i - 1); rec(rec, 10)
+ File "tests/exceptions/source/others/one_liner_recursion.py", line 14, in <lambda>
+ rec = lambda r, i: 1 / 0 if i == 0 else r(r, i - 1); rec(rec, 10)
+ File "tests/exceptions/source/others/one_liner_recursion.py", line 14, in <lambda>
+ rec = lambda r, i: 1 / 0 if i == 0 else r(r, i - 1); rec(rec, 10)
+ File "tests/exceptions/source/others/one_liner_recursion.py", line 14, in <lambda>
+ rec = lambda r, i: 1 / 0 if i == 0 else r(r, i - 1); rec(rec, 10)
+ [Previous line repeated 8 more times]
+ZeroDivisionError: division by zero
+
+[33m[1mTraceback (most recent call last):[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mone_liner_recursion.py[0m", line [33m14[0m, in [35m<module>[0m
+ [1mrec[0m [35m[1m=[0m [35m[1mlambda[0m [1mr[0m[1m,[0m [1mi[0m[1m:[0m [34m[1m1[0m [35m[1m/[0m [34m[1m0[0m [35m[1mif[0m [1mi[0m [35m[1m==[0m [34m[1m0[0m [35m[1melse[0m [1mr[0m[1m([0m[1mr[0m[1m,[0m [1mi[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m[1m;[0m [1mrec[0m[1m([0m[1mrec[0m[1m,[0m [34m[1m10[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mone_liner_recursion.py[0m", line [33m14[0m, in [35m<lambda>[0m
+ [1mrec[0m [35m[1m=[0m [35m[1mlambda[0m [1mr[0m[1m,[0m [1mi[0m[1m:[0m [34m[1m1[0m [35m[1m/[0m [34m[1m0[0m [35m[1mif[0m [1mi[0m [35m[1m==[0m [34m[1m0[0m [35m[1melse[0m [1mr[0m[1m([0m[1mr[0m[1m,[0m [1mi[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m[1m;[0m [1mrec[0m[1m([0m[1mrec[0m[1m,[0m [34m[1m10[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mone_liner_recursion.py[0m", line [33m14[0m, in [35m<lambda>[0m
+ [1mrec[0m [35m[1m=[0m [35m[1mlambda[0m [1mr[0m[1m,[0m [1mi[0m[1m:[0m [34m[1m1[0m [35m[1m/[0m [34m[1m0[0m [35m[1mif[0m [1mi[0m [35m[1m==[0m [34m[1m0[0m [35m[1melse[0m [1mr[0m[1m([0m[1mr[0m[1m,[0m [1mi[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m[1m;[0m [1mrec[0m[1m([0m[1mrec[0m[1m,[0m [34m[1m10[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mone_liner_recursion.py[0m", line [33m14[0m, in [35m<lambda>[0m
+ [1mrec[0m [35m[1m=[0m [35m[1mlambda[0m [1mr[0m[1m,[0m [1mi[0m[1m:[0m [34m[1m1[0m [35m[1m/[0m [34m[1m0[0m [35m[1mif[0m [1mi[0m [35m[1m==[0m [34m[1m0[0m [35m[1melse[0m [1mr[0m[1m([0m[1mr[0m[1m,[0m [1mi[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m[1m;[0m [1mrec[0m[1m([0m[1mrec[0m[1m,[0m [34m[1m10[0m[1m)[0m
+ [Previous line repeated 8 more times]
+[31m[1mZeroDivisionError[0m:[1m division by zero[0m
+
+Traceback (most recent call last):
+
+ File "tests/exceptions/source/others/one_liner_recursion.py", line 14, in <module>
+ rec = lambda r, i: 1 / 0 if i == 0 else r(r, i - 1); rec(rec, 10)
+ │ └ <function <lambda> at 0xDEADBEEF>
+ └ <function <lambda> at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/one_liner_recursion.py", line 14, in <lambda>
+ rec = lambda r, i: 1 / 0 if i == 0 else r(r, i - 1); rec(rec, 10)
+ │ │ │ │ │ │ │ └ <function <lambda> at 0xDEADBEEF>
+ │ │ │ │ │ │ └ <function <lambda> at 0xDEADBEEF>
+ │ │ │ │ │ └ 10
+ │ │ │ │ └ <function <lambda> at 0xDEADBEEF>
+ │ │ │ └ <function <lambda> at 0xDEADBEEF>
+ │ │ └ 10
+ │ └ 10
+ └ <function <lambda> at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/one_liner_recursion.py", line 14, in <lambda>
+ rec = lambda r, i: 1 / 0 if i == 0 else r(r, i - 1); rec(rec, 10)
+ │ │ │ │ │ │ │ └ <function <lambda> at 0xDEADBEEF>
+ │ │ │ │ │ │ └ <function <lambda> at 0xDEADBEEF>
+ │ │ │ │ │ └ 9
+ │ │ │ │ └ <function <lambda> at 0xDEADBEEF>
+ │ │ │ └ <function <lambda> at 0xDEADBEEF>
+ │ │ └ 9
+ │ └ 9
+ └ <function <lambda> at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/one_liner_recursion.py", line 14, in <lambda>
+ rec = lambda r, i: 1 / 0 if i == 0 else r(r, i - 1); rec(rec, 10)
+ │ │ │ │ │ │ │ └ <function <lambda> at 0xDEADBEEF>
+ │ │ │ │ │ │ └ <function <lambda> at 0xDEADBEEF>
+ │ │ │ │ │ └ 8
+ │ │ │ │ └ <function <lambda> at 0xDEADBEEF>
+ │ │ │ └ <function <lambda> at 0xDEADBEEF>
+ │ │ └ 8
+ │ └ 8
+ └ <function <lambda> at 0xDEADBEEF>
+ [Previous line repeated 8 more times]
+
+ZeroDivisionError: division by zero
diff --git a/tests/exceptions/output/others/recursion_error.txt b/tests/exceptions/output/others/recursion_error.txt
new file mode 100644
index 0000000..56219c1
--- /dev/null
+++ b/tests/exceptions/output/others/recursion_error.txt
@@ -0,0 +1,57 @@
+
+Traceback (most recent call last):
+ File "tests/exceptions/source/others/recursion_error.py", line 19, in <module>
+ recursive()
+ File "tests/exceptions/source/others/recursion_error.py", line 15, in recursive
+ recursive()
+ File "tests/exceptions/source/others/recursion_error.py", line 15, in recursive
+ recursive()
+ File "tests/exceptions/source/others/recursion_error.py", line 15, in recursive
+ recursive()
+ [Previous line repeated 996 more times]
+RecursionError: maximum recursion depth exceeded
+
+Traceback (most recent call last):
+> File "tests/exceptions/source/others/recursion_error.py", line 19, in <module>
+ recursive()
+ File "tests/exceptions/source/others/recursion_error.py", line 15, in recursive
+ recursive()
+ File "tests/exceptions/source/others/recursion_error.py", line 15, in recursive
+ recursive()
+ File "tests/exceptions/source/others/recursion_error.py", line 15, in recursive
+ recursive()
+ [Previous line repeated 996 more times]
+RecursionError: maximum recursion depth exceeded
+
+[33m[1mTraceback (most recent call last):[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrecursion_error.py[0m", line [33m19[0m, in [35m<module>[0m
+ [1mrecursive[0m[1m([0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrecursion_error.py[0m", line [33m15[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrecursion_error.py[0m", line [33m15[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrecursion_error.py[0m", line [33m15[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1m)[0m
+ [Previous line repeated 996 more times]
+[31m[1mRecursionError[0m:[1m maximum recursion depth exceeded[0m
+
+Traceback (most recent call last):
+
+ File "tests/exceptions/source/others/recursion_error.py", line 19, in <module>
+ recursive()
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/recursion_error.py", line 15, in recursive
+ recursive()
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/recursion_error.py", line 15, in recursive
+ recursive()
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/recursion_error.py", line 15, in recursive
+ recursive()
+ └ <function recursive at 0xDEADBEEF>
+ [Previous line repeated 996 more times]
+
+RecursionError: maximum recursion depth exceeded
diff --git a/tests/exceptions/output/others/repeated_lines.txt b/tests/exceptions/output/others/repeated_lines.txt
new file mode 100644
index 0000000..57ed102
--- /dev/null
+++ b/tests/exceptions/output/others/repeated_lines.txt
@@ -0,0 +1,504 @@
+
+Traceback (most recent call last):
+ File "tests/exceptions/source/others/repeated_lines.py", line 22, in <module>
+ recursive(10, 10)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 7 more times]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 6 more times]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 5 more times]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 4 more times]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 3 more times]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 2 more times]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 1 more time]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 15, in recursive
+ raise ValueError("End of recursion")
+ValueError: End of recursion
+
+Traceback (most recent call last):
+> File "tests/exceptions/source/others/repeated_lines.py", line 22, in <module>
+ recursive(10, 10)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 7 more times]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 6 more times]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 5 more times]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 4 more times]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 3 more times]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 2 more times]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ [Previous line repeated 1 more time]
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ File "tests/exceptions/source/others/repeated_lines.py", line 15, in recursive
+ raise ValueError("End of recursion")
+ValueError: End of recursion
+
+[33m[1mTraceback (most recent call last):[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m22[0m, in [35m<module>[0m
+ [1mrecursive[0m[1m([0m[34m[1m10[0m[1m,[0m [34m[1m10[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ [Previous line repeated 7 more times]
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m17[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m,[0m [1minner[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ [Previous line repeated 6 more times]
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m17[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m,[0m [1minner[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ [Previous line repeated 5 more times]
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m17[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m,[0m [1minner[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ [Previous line repeated 4 more times]
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m17[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m,[0m [1minner[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ [Previous line repeated 3 more times]
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m17[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m,[0m [1minner[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ [Previous line repeated 2 more times]
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m17[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m,[0m [1minner[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ [Previous line repeated 1 more time]
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m17[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m,[0m [1minner[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m17[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m,[0m [1minner[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m17[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m,[0m [1minner[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m18[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m[1m,[0m [1minner[0m[35m[1m=[0m[1minner[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m17[0m, in [35mrecursive[0m
+ [1mrecursive[0m[1m([0m[1mouter[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m,[0m [1minner[0m[35m[1m=[0m[1mouter[0m [35m[1m-[0m [34m[1m1[0m[1m)[0m
+ File "[32mtests/exceptions/source/others/[0m[32m[1mrepeated_lines.py[0m", line [33m15[0m, in [35mrecursive[0m
+ [35m[1mraise[0m [1mValueError[0m[1m([0m[36m"End of recursion"[0m[1m)[0m
+[31m[1mValueError[0m:[1m End of recursion[0m
+
+Traceback (most recent call last):
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 22, in <module>
+ recursive(10, 10)
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 10
+ │ └ 10
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 9
+ │ └ 10
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 8
+ │ └ 10
+ └ <function recursive at 0xDEADBEEF>
+ [Previous line repeated 7 more times]
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ │ │ └ 10
+ │ └ 10
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 9
+ │ └ 9
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 8
+ │ └ 9
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 7
+ │ └ 9
+ └ <function recursive at 0xDEADBEEF>
+ [Previous line repeated 6 more times]
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ │ │ └ 9
+ │ └ 9
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 8
+ │ └ 8
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 7
+ │ └ 8
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 6
+ │ └ 8
+ └ <function recursive at 0xDEADBEEF>
+ [Previous line repeated 5 more times]
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ │ │ └ 8
+ │ └ 8
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 7
+ │ └ 7
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 6
+ │ └ 7
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 5
+ │ └ 7
+ └ <function recursive at 0xDEADBEEF>
+ [Previous line repeated 4 more times]
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ │ │ └ 7
+ │ └ 7
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 6
+ │ └ 6
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 5
+ │ └ 6
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 4
+ │ └ 6
+ └ <function recursive at 0xDEADBEEF>
+ [Previous line repeated 3 more times]
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ │ │ └ 6
+ │ └ 6
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 5
+ │ └ 5
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 4
+ │ └ 5
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 3
+ │ └ 5
+ └ <function recursive at 0xDEADBEEF>
+ [Previous line repeated 2 more times]
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ │ │ └ 5
+ │ └ 5
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 4
+ │ └ 4
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 3
+ │ └ 4
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 2
+ │ └ 4
+ └ <function recursive at 0xDEADBEEF>
+ [Previous line repeated 1 more time]
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ │ │ └ 4
+ │ └ 4
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 3
+ │ └ 3
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 2
+ │ └ 3
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 1
+ │ └ 3
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ │ │ └ 3
+ │ └ 3
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 2
+ │ └ 2
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 1
+ │ └ 2
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ │ │ └ 2
+ │ └ 2
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 18, in recursive
+ recursive(outer=outer, inner=inner - 1)
+ │ │ └ 1
+ │ └ 1
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 17, in recursive
+ recursive(outer=outer - 1, inner=outer - 1)
+ │ │ └ 1
+ │ └ 1
+ └ <function recursive at 0xDEADBEEF>
+
+ File "tests/exceptions/source/others/repeated_lines.py", line 15, in recursive
+ raise ValueError("End of recursion")
+
+ValueError: End of recursion
diff --git a/tests/exceptions/source/others/one_liner_recursion.py b/tests/exceptions/source/others/one_liner_recursion.py
new file mode 100644
index 0000000..91f29c0
--- /dev/null
+++ b/tests/exceptions/source/others/one_liner_recursion.py
@@ -0,0 +1,16 @@
+# fmt: off
+from loguru import logger
+
+import sys
+
+
+logger.remove()
+logger.add(sys.stderr, format="", diagnose=False, backtrace=False, colorize=False)
+logger.add(sys.stderr, format="", diagnose=False, backtrace=True, colorize=False)
+logger.add(sys.stderr, format="", diagnose=False, backtrace=False, colorize=True)
+logger.add(sys.stderr, format="", diagnose=True, backtrace=False, colorize=False)
+
+try:
+ rec = lambda r, i: 1 / 0 if i == 0 else r(r, i - 1); rec(rec, 10)
+except Exception:
+ logger.exception("Error")
diff --git a/tests/exceptions/source/others/recursion_error.py b/tests/exceptions/source/others/recursion_error.py
new file mode 100644
index 0000000..15e0fea
--- /dev/null
+++ b/tests/exceptions/source/others/recursion_error.py
@@ -0,0 +1,21 @@
+from loguru import logger
+
+import sys
+
+sys.setrecursionlimit(1000)
+
+logger.remove()
+logger.add(sys.stderr, format="", diagnose=False, backtrace=False, colorize=False)
+logger.add(sys.stderr, format="", diagnose=False, backtrace=True, colorize=False)
+logger.add(sys.stderr, format="", diagnose=False, backtrace=False, colorize=True)
+logger.add(sys.stderr, format="", diagnose=True, backtrace=False, colorize=False)
+
+
+def recursive():
+ recursive()
+
+
+try:
+ recursive()
+except Exception:
+ logger.exception("Oups")
diff --git a/tests/exceptions/source/others/repeated_lines.py b/tests/exceptions/source/others/repeated_lines.py
new file mode 100644
index 0000000..404ecf6
--- /dev/null
+++ b/tests/exceptions/source/others/repeated_lines.py
@@ -0,0 +1,24 @@
+from loguru import logger
+
+import sys
+
+
+logger.remove()
+logger.add(sys.stderr, format="", diagnose=False, backtrace=False, colorize=False)
+logger.add(sys.stderr, format="", diagnose=False, backtrace=True, colorize=False)
+logger.add(sys.stderr, format="", diagnose=False, backtrace=False, colorize=True)
+logger.add(sys.stderr, format="", diagnose=True, backtrace=False, colorize=False)
+
+
+def recursive(outer, inner):
+ if outer == 0:
+ raise ValueError("End of recursion")
+ if inner == 0:
+ recursive(outer=outer - 1, inner=outer - 1)
+ recursive(outer=outer, inner=inner - 1)
+
+
+try:
+ recursive(10, 10)
+except Exception:
+ logger.exception("Oups")
diff --git a/tests/test_exceptions_formatting.py b/tests/test_exceptions_formatting.py
index 802cabb..47e05cf 100644
--- a/tests/test_exceptions_formatting.py
+++ b/tests/test_exceptions_formatting.py
@@ -219,6 +219,9 @@ def test_exception_ownership(filename):
"message_formatting_with_context_manager",
"message_formatting_with_decorator",
"nested_with_reraise",
+ "one_liner_recursion",
+ "recursion_error",
+ "repeated_lines",
"syntaxerror_without_traceback",
"sys_tracebacklimit",
"sys_tracebacklimit_negative",
@@ -228,6 +231,9 @@ def test_exception_ownership(filename):
],
)
def test_exception_others(filename):
+ if filename == "recursion_error" and platform.python_implementation() == "PyPy":
+ pytest.skip("RecursionError is not reliable on PyPy")
+
compare_exception("others", filename)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
attrs==25.3.0
babel==2.17.0
cachetools==5.5.2
certifi==2025.1.31
cfgv==3.4.0
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
decorator==5.2.1
distlib==0.3.9
docutils==0.20.1
exceptiongroup==1.1.3
filelock==3.18.0
freezegun==1.5.0
identify==2.6.9
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
-e git+https://github.com/Delgan/loguru.git@65fe4a8db9a8f297ae3648f51b5e3050b30945a9#egg=loguru
MarkupSafe==3.0.2
mypy==1.13.0
mypy-extensions==1.0.0
nodeenv==1.9.1
packaging @ file:///croot/packaging_1734472117206/work
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pre_commit==4.0.1
Pygments==2.19.1
pyproject-api==1.9.0
pytest==8.3.2
pytest-cov==6.0.0
pytest-mypy-plugins==3.1.0
python-dateutil==2.9.0.post0
PyYAML==6.0.2
referencing==0.36.2
regex==2024.11.6
requests==2.32.3
rpds-py==0.24.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.3.7
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
tomli==2.2.1
tomlkit==0.13.2
tox==4.23.2
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.30.0
zipp==3.21.0
| name: loguru
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- attrs==25.3.0
- babel==2.17.0
- cachetools==5.5.2
- certifi==2025.1.31
- cfgv==3.4.0
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- decorator==5.2.1
- distlib==0.3.9
- docutils==0.20.1
- exceptiongroup==1.1.3
- filelock==3.18.0
- freezegun==1.5.0
- identify==2.6.9
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- jinja2==3.1.6
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- loguru==0.7.2
- markupsafe==3.0.2
- mypy==1.13.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- platformdirs==4.3.7
- pre-commit==4.0.1
- pygments==2.19.1
- pyproject-api==1.9.0
- pytest==8.3.2
- pytest-cov==6.0.0
- pytest-mypy-plugins==3.1.0
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- referencing==0.36.2
- regex==2024.11.6
- requests==2.32.3
- rpds-py==0.24.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.3.7
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- tomli==2.2.1
- tomlkit==0.13.2
- tox==4.23.2
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.30.0
- zipp==3.21.0
prefix: /opt/conda/envs/loguru
| [
"tests/test_exceptions_formatting.py::test_exception_others[one_liner_recursion]",
"tests/test_exceptions_formatting.py::test_exception_others[recursion_error]",
"tests/test_exceptions_formatting.py::test_exception_others[repeated_lines]"
] | [] | [
"tests/test_exceptions_formatting.py::test_backtrace[chained_expression_direct]",
"tests/test_exceptions_formatting.py::test_backtrace[chained_expression_indirect]",
"tests/test_exceptions_formatting.py::test_backtrace[chaining_first]",
"tests/test_exceptions_formatting.py::test_backtrace[chaining_second]",
"tests/test_exceptions_formatting.py::test_backtrace[chaining_third]",
"tests/test_exceptions_formatting.py::test_backtrace[enqueue]",
"tests/test_exceptions_formatting.py::test_backtrace[enqueue_with_others_handlers]",
"tests/test_exceptions_formatting.py::test_backtrace[frame_values_backward]",
"tests/test_exceptions_formatting.py::test_backtrace[frame_values_forward]",
"tests/test_exceptions_formatting.py::test_backtrace[function]",
"tests/test_exceptions_formatting.py::test_backtrace[head_recursion]",
"tests/test_exceptions_formatting.py::test_backtrace[missing_attributes_traceback_objects]",
"tests/test_exceptions_formatting.py::test_backtrace[nested]",
"tests/test_exceptions_formatting.py::test_backtrace[nested_chained_catch_up]",
"tests/test_exceptions_formatting.py::test_backtrace[nested_decorator_catch_up]",
"tests/test_exceptions_formatting.py::test_backtrace[nested_explicit_catch_up]",
"tests/test_exceptions_formatting.py::test_backtrace[nested_wrapping]",
"tests/test_exceptions_formatting.py::test_backtrace[no_tb]",
"tests/test_exceptions_formatting.py::test_backtrace[not_enough_arguments]",
"tests/test_exceptions_formatting.py::test_backtrace[raising_recursion]",
"tests/test_exceptions_formatting.py::test_backtrace[suppressed_expression_direct]",
"tests/test_exceptions_formatting.py::test_backtrace[suppressed_expression_indirect]",
"tests/test_exceptions_formatting.py::test_backtrace[tail_recursion]",
"tests/test_exceptions_formatting.py::test_backtrace[too_many_arguments]",
"tests/test_exceptions_formatting.py::test_diagnose[assertion_error]",
"tests/test_exceptions_formatting.py::test_diagnose[assertion_error_custom]",
"tests/test_exceptions_formatting.py::test_diagnose[assertion_error_in_string]",
"tests/test_exceptions_formatting.py::test_diagnose[attributes]",
"tests/test_exceptions_formatting.py::test_diagnose[chained_both]",
"tests/test_exceptions_formatting.py::test_diagnose[encoding]",
"tests/test_exceptions_formatting.py::test_diagnose[global_variable]",
"tests/test_exceptions_formatting.py::test_diagnose[indentation_error]",
"tests/test_exceptions_formatting.py::test_diagnose[keyword_argument]",
"tests/test_exceptions_formatting.py::test_diagnose[multilines_repr]",
"tests/test_exceptions_formatting.py::test_diagnose[no_error_message]",
"tests/test_exceptions_formatting.py::test_diagnose[parenthesis]",
"tests/test_exceptions_formatting.py::test_diagnose[source_multilines]",
"tests/test_exceptions_formatting.py::test_diagnose[source_strings]",
"tests/test_exceptions_formatting.py::test_diagnose[syntax_error]",
"tests/test_exceptions_formatting.py::test_diagnose[syntax_highlighting]",
"tests/test_exceptions_formatting.py::test_diagnose[truncating]",
"tests/test_exceptions_formatting.py::test_diagnose[unprintable_object]",
"tests/test_exceptions_formatting.py::test_exception_ownership[assertion_from_lib]",
"tests/test_exceptions_formatting.py::test_exception_ownership[assertion_from_local]",
"tests/test_exceptions_formatting.py::test_exception_ownership[callback]",
"tests/test_exceptions_formatting.py::test_exception_ownership[catch_decorator]",
"tests/test_exceptions_formatting.py::test_exception_ownership[catch_decorator_from_lib]",
"tests/test_exceptions_formatting.py::test_exception_ownership[decorated_callback]",
"tests/test_exceptions_formatting.py::test_exception_ownership[direct]",
"tests/test_exceptions_formatting.py::test_exception_ownership[indirect]",
"tests/test_exceptions_formatting.py::test_exception_ownership[string_lib]",
"tests/test_exceptions_formatting.py::test_exception_ownership[string_source]",
"tests/test_exceptions_formatting.py::test_exception_ownership[syntaxerror]",
"tests/test_exceptions_formatting.py::test_exception_others[assertionerror_without_traceback]",
"tests/test_exceptions_formatting.py::test_exception_others[broken_but_decorated_repr]",
"tests/test_exceptions_formatting.py::test_exception_others[catch_as_context_manager]",
"tests/test_exceptions_formatting.py::test_exception_others[catch_as_decorator_with_parentheses]",
"tests/test_exceptions_formatting.py::test_exception_others[catch_as_decorator_without_parentheses]",
"tests/test_exceptions_formatting.py::test_exception_others[catch_as_function]",
"tests/test_exceptions_formatting.py::test_exception_others[catch_message]",
"tests/test_exceptions_formatting.py::test_exception_others[exception_formatting_coroutine]",
"tests/test_exceptions_formatting.py::test_exception_others[exception_formatting_function]",
"tests/test_exceptions_formatting.py::test_exception_others[exception_formatting_generator]",
"tests/test_exceptions_formatting.py::test_exception_others[exception_in_property]",
"tests/test_exceptions_formatting.py::test_exception_others[handler_formatting_with_context_manager]",
"tests/test_exceptions_formatting.py::test_exception_others[handler_formatting_with_decorator]",
"tests/test_exceptions_formatting.py::test_exception_others[level_name]",
"tests/test_exceptions_formatting.py::test_exception_others[level_number]",
"tests/test_exceptions_formatting.py::test_exception_others[message_formatting_with_context_manager]",
"tests/test_exceptions_formatting.py::test_exception_others[message_formatting_with_decorator]",
"tests/test_exceptions_formatting.py::test_exception_others[nested_with_reraise]",
"tests/test_exceptions_formatting.py::test_exception_others[syntaxerror_without_traceback]",
"tests/test_exceptions_formatting.py::test_exception_others[sys_tracebacklimit]",
"tests/test_exceptions_formatting.py::test_exception_others[sys_tracebacklimit_negative]",
"tests/test_exceptions_formatting.py::test_exception_others[sys_tracebacklimit_none]",
"tests/test_exceptions_formatting.py::test_exception_others[sys_tracebacklimit_unset]",
"tests/test_exceptions_formatting.py::test_exception_others[zerodivisionerror_without_traceback]",
"tests/test_exceptions_formatting.py::test_exception_modern[type_hints-minimum_python_version0]",
"tests/test_exceptions_formatting.py::test_exception_modern[positional_only_argument-minimum_python_version1]",
"tests/test_exceptions_formatting.py::test_exception_modern[walrus_operator-minimum_python_version2]",
"tests/test_exceptions_formatting.py::test_group_exception_using_backport",
"tests/test_exceptions_formatting.py::test_invalid_format_exception_only_no_output",
"tests/test_exceptions_formatting.py::test_invalid_format_exception_only_indented_error_message"
] | [] | MIT License | 20,310 | 462 | [
"loguru/_better_exceptions.py"
] |
|
tableau__server-client-python-1538 | 34605289489851184826afd96e8d27982b765ad3 | 2024-11-25 04:16:45 | 34605289489851184826afd96e8d27982b765ad3 | diff --git a/tableauserverclient/models/custom_view_item.py b/tableauserverclient/models/custom_view_item.py
index a0c0a98..5cafe46 100644
--- a/tableauserverclient/models/custom_view_item.py
+++ b/tableauserverclient/models/custom_view_item.py
@@ -5,14 +5,58 @@ from defusedxml.ElementTree import fromstring, tostring
from typing import Callable, Optional
from collections.abc import Iterator
-from .exceptions import UnpopulatedPropertyError
-from .user_item import UserItem
-from .view_item import ViewItem
-from .workbook_item import WorkbookItem
-from ..datetime_helpers import parse_datetime
+from tableauserverclient.models.exceptions import UnpopulatedPropertyError
+from tableauserverclient.models.user_item import UserItem
+from tableauserverclient.models.view_item import ViewItem
+from tableauserverclient.models.workbook_item import WorkbookItem
+from tableauserverclient.datetime_helpers import parse_datetime
class CustomViewItem:
+ """
+ Represents a Custom View item on Tableau Server.
+
+ Parameters
+ ----------
+ id : Optional[str]
+ The ID of the Custom View item.
+
+ name : Optional[str]
+ The name of the Custom View item.
+
+ Attributes
+ ----------
+ content_url : Optional[str]
+ The content URL of the Custom View item.
+
+ created_at : Optional[datetime]
+ The date and time the Custom View item was created.
+
+ image: bytes
+ The image of the Custom View item. Must be populated first.
+
+ pdf: bytes
+ The PDF of the Custom View item. Must be populated first.
+
+ csv: Iterator[bytes]
+ The CSV of the Custom View item. Must be populated first.
+
+ shared : Optional[bool]
+ Whether the Custom View item is shared.
+
+ updated_at : Optional[datetime]
+ The date and time the Custom View item was last updated.
+
+ owner : Optional[UserItem]
+ The id of the owner of the Custom View item.
+
+ workbook : Optional[WorkbookItem]
+ The id of the workbook the Custom View item belongs to.
+
+ view : Optional[ViewItem]
+ The id of the view the Custom View item belongs to.
+ """
+
def __init__(self, id: Optional[str] = None, name: Optional[str] = None) -> None:
self._content_url: Optional[str] = None # ?
self._created_at: Optional["datetime"] = None
diff --git a/tableauserverclient/server/endpoint/auth_endpoint.py b/tableauserverclient/server/endpoint/auth_endpoint.py
index 4211bb7..35dfa5d 100644
--- a/tableauserverclient/server/endpoint/auth_endpoint.py
+++ b/tableauserverclient/server/endpoint/auth_endpoint.py
@@ -84,9 +84,10 @@ class Auth(Endpoint):
self._check_status(server_response, url)
parsed_response = fromstring(server_response.content)
site_id = parsed_response.find(".//t:site", namespaces=self.parent_srv.namespace).get("id", None)
+ site_url = parsed_response.find(".//t:site", namespaces=self.parent_srv.namespace).get("contentUrl", None)
user_id = parsed_response.find(".//t:user", namespaces=self.parent_srv.namespace).get("id", None)
auth_token = parsed_response.find("t:credentials", namespaces=self.parent_srv.namespace).get("token", None)
- self.parent_srv._set_auth(site_id, user_id, auth_token)
+ self.parent_srv._set_auth(site_id, user_id, auth_token, site_url)
logger.info(f"Signed into {self.parent_srv.server_address} as user with id {user_id}")
return Auth.contextmgr(self.sign_out)
@@ -155,9 +156,10 @@ class Auth(Endpoint):
self._check_status(server_response, url)
parsed_response = fromstring(server_response.content)
site_id = parsed_response.find(".//t:site", namespaces=self.parent_srv.namespace).get("id", None)
+ site_url = parsed_response.find(".//t:site", namespaces=self.parent_srv.namespace).get("contentUrl", None)
user_id = parsed_response.find(".//t:user", namespaces=self.parent_srv.namespace).get("id", None)
auth_token = parsed_response.find("t:credentials", namespaces=self.parent_srv.namespace).get("token", None)
- self.parent_srv._set_auth(site_id, user_id, auth_token)
+ self.parent_srv._set_auth(site_id, user_id, auth_token, site_url)
logger.info(f"Signed into {self.parent_srv.server_address} as user with id {user_id}")
return Auth.contextmgr(self.sign_out)
diff --git a/tableauserverclient/server/endpoint/custom_views_endpoint.py b/tableauserverclient/server/endpoint/custom_views_endpoint.py
index b02b05d..8d78dca 100644
--- a/tableauserverclient/server/endpoint/custom_views_endpoint.py
+++ b/tableauserverclient/server/endpoint/custom_views_endpoint.py
@@ -3,7 +3,7 @@ import logging
import os
from contextlib import closing
from pathlib import Path
-from typing import Optional, Union
+from typing import Optional, Union, TYPE_CHECKING
from collections.abc import Iterator
from tableauserverclient.config import BYTES_PER_MB, config
@@ -21,6 +21,9 @@ from tableauserverclient.server import (
from tableauserverclient.helpers.logging import logger
+if TYPE_CHECKING:
+ from tableauserverclient.server.query import QuerySet
+
"""
Get a list of custom views on a site
get the details of a custom view
@@ -51,19 +54,31 @@ class CustomViews(QuerysetEndpoint[CustomViewItem]):
def expurl(self) -> str:
return f"{self.parent_srv._server_address}/api/exp/sites/{self.parent_srv.site_id}/customviews"
- """
- If the request has no filter parameters: Administrators will see all custom views.
- Other users will see only custom views that they own.
- If the filter parameters include ownerId: Users will see only custom views that they own.
- If the filter parameters include viewId and/or workbookId, and don't include ownerId:
- Users will see those custom views that they have Write and WebAuthoring permissions for.
- If site user visibility is not set to Limited, the Users will see those custom views that are "public",
- meaning the value of their shared attribute is true.
- If site user visibility is set to Limited, ????
- """
-
@api(version="3.18")
def get(self, req_options: Optional["RequestOptions"] = None) -> tuple[list[CustomViewItem], PaginationItem]:
+ """
+ Get a list of custom views on a site.
+
+ If the request has no filter parameters: Administrators will see all custom views.
+ Other users will see only custom views that they own.
+ If the filter parameters include ownerId: Users will see only custom views that they own.
+ If the filter parameters include viewId and/or workbookId, and don't include ownerId:
+ Users will see those custom views that they have Write and WebAuthoring permissions for.
+ If site user visibility is not set to Limited, the Users will see those custom views that are "public",
+ meaning the value of their shared attribute is true.
+ If site user visibility is set to Limited, ????
+
+ Rest API: https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_ref_workbooks_and_views.htm#list_custom_views
+
+ Parameters
+ ----------
+ req_options : RequestOptions, optional
+ Filtering options for the request, by default None
+
+ Returns
+ -------
+ tuple[list[CustomViewItem], PaginationItem]
+ """
logger.info("Querying all custom views on site")
url = self.baseurl
server_response = self.get_request(url, req_options)
@@ -73,6 +88,19 @@ class CustomViews(QuerysetEndpoint[CustomViewItem]):
@api(version="3.18")
def get_by_id(self, view_id: str) -> Optional[CustomViewItem]:
+ """
+ Get the details of a specific custom view.
+
+ Rest API: https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_ref_workbooks_and_views.htm#get_custom_view
+
+ Parameters
+ ----------
+ view_id : str
+
+ Returns
+ -------
+ Optional[CustomViewItem]
+ """
if not view_id:
error = "Custom view item missing ID."
raise MissingRequiredFieldError(error)
@@ -83,6 +111,27 @@ class CustomViews(QuerysetEndpoint[CustomViewItem]):
@api(version="3.18")
def populate_image(self, view_item: CustomViewItem, req_options: Optional["ImageRequestOptions"] = None) -> None:
+ """
+ Populate the image of a custom view.
+
+ Rest API: https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_ref_workbooks_and_views.htm#get_custom_view_image
+
+ Parameters
+ ----------
+ view_item : CustomViewItem
+
+ req_options : ImageRequestOptions, optional
+ Options to customize the image returned, by default None
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ MissingRequiredFieldError
+ If the view_item is missing an ID
+ """
if not view_item.id:
error = "Custom View item missing ID."
raise MissingRequiredFieldError(error)
@@ -101,6 +150,26 @@ class CustomViews(QuerysetEndpoint[CustomViewItem]):
@api(version="3.23")
def populate_pdf(self, custom_view_item: CustomViewItem, req_options: Optional["PDFRequestOptions"] = None) -> None:
+ """
+ Populate the PDF of a custom view.
+
+ Parameters
+ ----------
+ custom_view_item : CustomViewItem
+ The custom view item to populate the PDF for.
+
+ req_options : PDFRequestOptions, optional
+ Options to customize the PDF returned, by default None
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ MissingRequiredFieldError
+ If the custom view item is missing an ID
+ """
if not custom_view_item.id:
error = "Custom View item missing ID."
raise MissingRequiredFieldError(error)
@@ -121,6 +190,26 @@ class CustomViews(QuerysetEndpoint[CustomViewItem]):
@api(version="3.23")
def populate_csv(self, custom_view_item: CustomViewItem, req_options: Optional["CSVRequestOptions"] = None) -> None:
+ """
+ Populate the CSV of a custom view.
+
+ Parameters
+ ----------
+ custom_view_item : CustomViewItem
+ The custom view item to populate the CSV for.
+
+ req_options : CSVRequestOptions, optional
+ Options to customize the CSV returned, by default None
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ MissingRequiredFieldError
+ If the custom view item is missing an ID
+ """
if not custom_view_item.id:
error = "Custom View item missing ID."
raise MissingRequiredFieldError(error)
@@ -141,6 +230,21 @@ class CustomViews(QuerysetEndpoint[CustomViewItem]):
@api(version="3.18")
def update(self, view_item: CustomViewItem) -> Optional[CustomViewItem]:
+ """
+ Updates the name, owner, or shared status of a custom view.
+
+ Rest API: https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_ref_workbooks_and_views.htm#update_custom_view
+
+ Parameters
+ ----------
+ view_item : CustomViewItem
+ The custom view item to update.
+
+ Returns
+ -------
+ Optional[CustomViewItem]
+ The updated custom view item.
+ """
if not view_item.id:
error = "Custom view item missing ID."
raise MissingRequiredFieldError(error)
@@ -158,6 +262,25 @@ class CustomViews(QuerysetEndpoint[CustomViewItem]):
# Delete 1 view by id
@api(version="3.19")
def delete(self, view_id: str) -> None:
+ """
+ Deletes a single custom view by ID.
+
+ Rest API: https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_ref_workbooks_and_views.htm#delete_custom_view
+
+ Parameters
+ ----------
+ view_id : str
+ The ID of the custom view to delete.
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ ValueError
+ If the view_id is not provided.
+ """
if not view_id:
error = "Custom View ID undefined."
raise ValueError(error)
@@ -167,6 +290,27 @@ class CustomViews(QuerysetEndpoint[CustomViewItem]):
@api(version="3.21")
def download(self, view_item: CustomViewItem, file: PathOrFileW) -> PathOrFileW:
+ """
+ Download the definition of a custom view as json. The file parameter can
+ be a file path or a file object. If a file path is provided, the file
+ will be written to that location. If a file object is provided, the file
+ will be written to that object.
+
+ May contain sensitive information.
+
+ Parameters
+ ----------
+ view_item : CustomViewItem
+ The custom view item to download.
+
+ file : PathOrFileW
+ The file path or file object to write the custom view to.
+
+ Returns
+ -------
+ PathOrFileW
+ The file path or file object that the custom view was written to.
+ """
url = f"{self.expurl}/{view_item.id}/content"
server_response = self.get_request(url)
if isinstance(file, io_types_w):
@@ -180,6 +324,25 @@ class CustomViews(QuerysetEndpoint[CustomViewItem]):
@api(version="3.21")
def publish(self, view_item: CustomViewItem, file: PathOrFileR) -> Optional[CustomViewItem]:
+ """
+ Publish a custom view to Tableau Server. The file parameter can be a
+ file path or a file object. If a file path is provided, the file will be
+ read from that location. If a file object is provided, the file will be
+ read from that object.
+
+ Parameters
+ ----------
+ view_item : CustomViewItem
+ The custom view item to publish.
+
+ file : PathOrFileR
+ The file path or file object to read the custom view from.
+
+ Returns
+ -------
+ Optional[CustomViewItem]
+ The published custom view item.
+ """
url = self.expurl
if isinstance(file, io_types_r):
size = get_file_object_size(file)
@@ -207,3 +370,25 @@ class CustomViews(QuerysetEndpoint[CustomViewItem]):
server_response = self.post_request(url, xml_request, content_type)
return CustomViewItem.from_response(server_response.content, self.parent_srv.namespace)
+
+ def filter(self, *invalid, page_size: Optional[int] = None, **kwargs) -> "QuerySet[CustomViewItem]":
+ """
+ Queries the Tableau Server for items using the specified filters. Page
+ size can be specified to limit the number of items returned in a single
+ request. If not specified, the default page size is 100. Page size can
+ be an integer between 1 and 1000.
+
+ No positional arguments are allowed. All filters must be specified as
+ keyword arguments. If you use the equality operator, you can specify it
+ through <field_name>=<value>. If you want to use a different operator,
+ you can specify it through <field_name>__<operator>=<value>. Field
+ names can either be in snake_case or camelCase.
+
+ This endpoint supports the following fields and operators:
+
+ view_id=...
+ workbook_id=...
+ owner_id=...
+ """
+
+ return super().filter(*invalid, page_size=page_size, **kwargs)
diff --git a/tableauserverclient/server/endpoint/datasources_endpoint.py b/tableauserverclient/server/endpoint/datasources_endpoint.py
index 6bd809c..88c739d 100644
--- a/tableauserverclient/server/endpoint/datasources_endpoint.py
+++ b/tableauserverclient/server/endpoint/datasources_endpoint.py
@@ -102,10 +102,15 @@ class Datasources(QuerysetEndpoint[DatasourceItem], TaggingMixin[DatasourceItem]
datasource_item._set_connections(connections_fetcher)
logger.info(f"Populated connections for datasource (ID: {datasource_item.id})")
- def _get_datasource_connections(self, datasource_item, req_options=None):
+ def _get_datasource_connections(
+ self, datasource_item: DatasourceItem, req_options: Optional[RequestOptions] = None
+ ) -> list[ConnectionItem]:
url = f"{self.baseurl}/{datasource_item.id}/connections"
server_response = self.get_request(url, req_options)
connections = ConnectionItem.from_response(server_response.content, self.parent_srv.namespace)
+ for connection in connections:
+ connection._datasource_id = datasource_item.id
+ connection._datasource_name = datasource_item.name
return connections
# Delete 1 datasource by id
diff --git a/tableauserverclient/server/request_options.py b/tableauserverclient/server/request_options.py
index d79ac7f..2a5bb80 100644
--- a/tableauserverclient/server/request_options.py
+++ b/tableauserverclient/server/request_options.py
@@ -122,6 +122,7 @@ class RequestOptions(RequestOptionsBase):
NotificationType = "notificationType"
OwnerDomain = "ownerDomain"
OwnerEmail = "ownerEmail"
+ OwnerId = "ownerId"
OwnerName = "ownerName"
ParentProjectId = "parentProjectId"
Priority = "priority"
@@ -148,8 +149,10 @@ class RequestOptions(RequestOptionsBase):
UpdatedAt = "updatedAt"
UserCount = "userCount"
UserId = "userId"
+ ViewId = "viewId"
ViewUrlName = "viewUrlName"
WorkbookDescription = "workbookDescription"
+ WorkbookId = "workbookId"
WorkbookName = "workbookName"
class Direction:
diff --git a/tableauserverclient/server/server.py b/tableauserverclient/server/server.py
index 4eeefca..02abb3f 100644
--- a/tableauserverclient/server/server.py
+++ b/tableauserverclient/server/server.py
@@ -207,12 +207,14 @@ class Server:
self._site_id = None
self._user_id = None
self._auth_token = None
+ self._site_url = None
self._session = self._session_factory()
- def _set_auth(self, site_id, user_id, auth_token):
+ def _set_auth(self, site_id, user_id, auth_token, site_url=None):
self._site_id = site_id
self._user_id = user_id
self._auth_token = auth_token
+ self._site_url = site_url
def _get_legacy_version(self):
# the serverInfo call was introduced in 2.4, earlier than that we have this different call
@@ -282,6 +284,13 @@ class Server:
raise NotSignedInError(error)
return self._site_id
+ @property
+ def site_url(self):
+ if self._site_url is None:
+ error = "Missing site URL. You must sign in first."
+ raise NotSignedInError(error)
+ return self._site_url
+
@property
def user_id(self):
if self._user_id is None:
| `populate_connections` doesn't fetch `datasource_id`
**Describe the bug**
Hi,
I'm encountering an issue with the Tableau Python client. Specifically, the `datasource_id` is missing from the `ConnectionItem` object.
Here’s the scenario: after retrieving a datasource by id, I populate its connections, but the `ConnectionItem` objects lack the datasource information.
From reviewing the source code:
https://github.com/tableau/server-client-python/blob/1d98fdad189ebed130fb904e8fa5dca2207f9011/tableauserverclient/server/endpoint/datasources_endpoint.py#L106
it seems the REST method being used by the module is: `GET /api/api-version/sites/site-id/datasources/datasource-id/connections`.
According to the [official documentation](https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_ref_data_sources.htm), this method does not appear to return datasource information.
**Versions**
- Tableau Cloud
- Python version: 3.11.0
- TSC library version: 0.34
**To Reproduce**
```python
import os
import tableauserverclient as TSC
token_name = 'my_token'
token_value = os.environ['TABLEAU_TOKEN']
tableau_site = 'my-site-it'
tableau_server = 'https://eu-west-1a.online.tableau.com'
datasource_id = 'REPLACE_ME'
def main():
tableau_auth = TSC.PersonalAccessTokenAuth(token_name, token_value, site_id=tableau_site)
server = TSC.Server(tableau_server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
ds = server.datasources.get_by_id(datasource_id)
server.datasources.populate_connections(ds)
# This will fail
assert ds.connections[0].datasource_id
if __name__ == '__main__':
main()
```
**Results**
`connections[0].datasource_id` is `None`, data is not fetched from the REST API.
Thanks :) | tableau/server-client-python | diff --git a/test/test_auth.py b/test/test_auth.py
index 48100ad..09e3e25 100644
--- a/test/test_auth.py
+++ b/test/test_auth.py
@@ -27,6 +27,7 @@ class AuthTests(unittest.TestCase):
self.assertEqual("eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l", self.server.auth_token)
self.assertEqual("6b7179ba-b82b-4f0f-91ed-812074ac5da6", self.server.site_id)
+ self.assertEqual("Samples", self.server.site_url)
self.assertEqual("1a96d216-e9b8-497b-a82a-0b899a965e01", self.server.user_id)
def test_sign_in_with_personal_access_tokens(self):
@@ -41,6 +42,7 @@ class AuthTests(unittest.TestCase):
self.assertEqual("eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l", self.server.auth_token)
self.assertEqual("6b7179ba-b82b-4f0f-91ed-812074ac5da6", self.server.site_id)
+ self.assertEqual("Samples", self.server.site_url)
self.assertEqual("1a96d216-e9b8-497b-a82a-0b899a965e01", self.server.user_id)
def test_sign_in_impersonate(self):
@@ -93,6 +95,7 @@ class AuthTests(unittest.TestCase):
self.assertIsNone(self.server._auth_token)
self.assertIsNone(self.server._site_id)
+ self.assertIsNone(self.server._site_url)
self.assertIsNone(self.server._user_id)
def test_switch_site(self):
@@ -109,6 +112,7 @@ class AuthTests(unittest.TestCase):
self.assertEqual("eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l", self.server.auth_token)
self.assertEqual("6b7179ba-b82b-4f0f-91ed-812074ac5da6", self.server.site_id)
+ self.assertEqual("Samples", self.server.site_url)
self.assertEqual("1a96d216-e9b8-497b-a82a-0b899a965e01", self.server.user_id)
def test_revoke_all_server_admin_tokens(self):
@@ -125,4 +129,5 @@ class AuthTests(unittest.TestCase):
self.assertEqual("eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l", self.server.auth_token)
self.assertEqual("6b7179ba-b82b-4f0f-91ed-812074ac5da6", self.server.site_id)
+ self.assertEqual("Samples", self.server.site_url)
self.assertEqual("1a96d216-e9b8-497b-a82a-0b899a965e01", self.server.user_id)
diff --git a/test/test_datasource.py b/test/test_datasource.py
index 45d9ba9..e8a9572 100644
--- a/test/test_datasource.py
+++ b/test/test_datasource.py
@@ -174,17 +174,22 @@ class DatasourceTests(unittest.TestCase):
connections: Optional[list[ConnectionItem]] = single_datasource.connections
self.assertIsNotNone(connections)
+ assert connections is not None
ds1, ds2 = connections
self.assertEqual("be786ae0-d2bf-4a4b-9b34-e2de8d2d4488", ds1.id)
self.assertEqual("textscan", ds1.connection_type)
self.assertEqual("forty-two.net", ds1.server_address)
self.assertEqual("duo", ds1.username)
self.assertEqual(True, ds1.embed_password)
+ self.assertEqual(ds1.datasource_id, single_datasource.id)
+ self.assertEqual(single_datasource.name, ds1.datasource_name)
self.assertEqual("970e24bc-e200-4841-a3e9-66e7d122d77e", ds2.id)
self.assertEqual("sqlserver", ds2.connection_type)
self.assertEqual("database.com", ds2.server_address)
self.assertEqual("heero", ds2.username)
self.assertEqual(False, ds2.embed_password)
+ self.assertEqual(ds2.datasource_id, single_datasource.id)
+ self.assertEqual(single_datasource.name, ds2.datasource_name)
def test_update_connection(self) -> None:
populate_xml, response_xml = read_xml_assets(POPULATE_CONNECTIONS_XML, UPDATE_CONNECTION_XML)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 6
} | 0.34 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
black==24.8.0
build==1.2.2.post1
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
coverage==7.8.0
defusedxml==0.7.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
idna==3.10
importlib_metadata==8.6.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mypy==1.4.0
mypy-extensions==1.0.0
packaging @ file:///croot/packaging_1734472117206/work
pathspec==0.12.1
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pyproject_hooks==1.2.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
pytest-subtests==0.14.1
requests==2.32.3
requests-mock==1.12.1
-e git+https://github.com/tableau/server-client-python.git@34605289489851184826afd96e8d27982b765ad3#egg=tableauserverclient
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
urllib3==2.3.0
zipp==3.21.0
| name: server-client-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- black==24.8.0
- build==1.2.2.post1
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- coverage==7.8.0
- defusedxml==0.7.1
- idna==3.10
- importlib-metadata==8.6.1
- mypy==1.4.0
- mypy-extensions==1.0.0
- pathspec==0.12.1
- platformdirs==4.3.7
- pyproject-hooks==1.2.0
- pytest-cov==6.0.0
- pytest-subtests==0.14.1
- requests==2.32.3
- requests-mock==1.12.1
- tableauserverclient==0.32.post0.dev112
- typing-extensions==4.13.0
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/server-client-python
| [
"test/test_auth.py::AuthTests::test_revoke_all_server_admin_tokens",
"test/test_auth.py::AuthTests::test_sign_in",
"test/test_auth.py::AuthTests::test_sign_in_with_personal_access_tokens",
"test/test_auth.py::AuthTests::test_sign_out",
"test/test_auth.py::AuthTests::test_switch_site",
"test/test_datasource.py::DatasourceTests::test_populate_connections"
] | [
"test/test_datasource.py::DatasourceTests::test_update_hyper_data_datasource_payload_file"
] | [
"test/test_auth.py::AuthTests::test_sign_in_error",
"test/test_auth.py::AuthTests::test_sign_in_impersonate",
"test/test_auth.py::AuthTests::test_sign_in_invalid_token",
"test/test_auth.py::AuthTests::test_sign_in_without_auth",
"test/test_datasource.py::DatasourceTests::test_bad_download_response",
"test/test_datasource.py::DatasourceTests::test_create_extracts",
"test/test_datasource.py::DatasourceTests::test_create_extracts_encrypted",
"test/test_datasource.py::DatasourceTests::test_credentials_and_multi_connect_raises_exception",
"test/test_datasource.py::DatasourceTests::test_delete",
"test/test_datasource.py::DatasourceTests::test_delete_extracts",
"test/test_datasource.py::DatasourceTests::test_delete_revision",
"test/test_datasource.py::DatasourceTests::test_download",
"test/test_datasource.py::DatasourceTests::test_download_extract_only",
"test/test_datasource.py::DatasourceTests::test_download_object",
"test/test_datasource.py::DatasourceTests::test_download_revision",
"test/test_datasource.py::DatasourceTests::test_download_sanitizes_name",
"test/test_datasource.py::DatasourceTests::test_get",
"test/test_datasource.py::DatasourceTests::test_get_before_signin",
"test/test_datasource.py::DatasourceTests::test_get_by_id",
"test/test_datasource.py::DatasourceTests::test_get_empty",
"test/test_datasource.py::DatasourceTests::test_populate_permissions",
"test/test_datasource.py::DatasourceTests::test_publish",
"test/test_datasource.py::DatasourceTests::test_publish_a_non_packaged_file_object",
"test/test_datasource.py::DatasourceTests::test_publish_a_packaged_file_object",
"test/test_datasource.py::DatasourceTests::test_publish_async",
"test/test_datasource.py::DatasourceTests::test_publish_file_object_of_unknown_type_raises_exception",
"test/test_datasource.py::DatasourceTests::test_publish_hyper_file_object_raises_exception",
"test/test_datasource.py::DatasourceTests::test_publish_invalid_file_type",
"test/test_datasource.py::DatasourceTests::test_publish_missing_mode",
"test/test_datasource.py::DatasourceTests::test_publish_missing_path",
"test/test_datasource.py::DatasourceTests::test_publish_multi_connection",
"test/test_datasource.py::DatasourceTests::test_publish_single_connection",
"test/test_datasource.py::DatasourceTests::test_publish_tde_file_object_raises_exception",
"test/test_datasource.py::DatasourceTests::test_publish_unnamed_file_object",
"test/test_datasource.py::DatasourceTests::test_refresh_id",
"test/test_datasource.py::DatasourceTests::test_refresh_object",
"test/test_datasource.py::DatasourceTests::test_revisions",
"test/test_datasource.py::DatasourceTests::test_synchronous_publish_timeout_error",
"test/test_datasource.py::DatasourceTests::test_update",
"test/test_datasource.py::DatasourceTests::test_update_connection",
"test/test_datasource.py::DatasourceTests::test_update_copy_fields",
"test/test_datasource.py::DatasourceTests::test_update_hyper_data_connection_object",
"test/test_datasource.py::DatasourceTests::test_update_hyper_data_datasource_invalid_payload_file",
"test/test_datasource.py::DatasourceTests::test_update_hyper_data_datasource_object",
"test/test_datasource.py::DatasourceTests::test_update_hyper_data_datasource_string",
"test/test_datasource.py::DatasourceTests::test_update_missing_id",
"test/test_datasource.py::DatasourceTests::test_update_tags"
] | [] | MIT License | 20,316 | 4,608 | [
"tableauserverclient/models/custom_view_item.py",
"tableauserverclient/server/endpoint/auth_endpoint.py",
"tableauserverclient/server/endpoint/custom_views_endpoint.py",
"tableauserverclient/server/endpoint/datasources_endpoint.py",
"tableauserverclient/server/request_options.py",
"tableauserverclient/server/server.py"
] |
|
tobymao__sqlglot-4447 | 954d8fd12740071e0951d1df3a405a4b9634868d | 2024-11-26 09:43:29 | 3945acc4a0dfd58147de929c9a2c71734d8f1ade | diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py
index 5eb02a14..2f3ac53e 100644
--- a/sqlglot/dialects/bigquery.py
+++ b/sqlglot/dialects/bigquery.py
@@ -40,6 +40,11 @@ if t.TYPE_CHECKING:
logger = logging.getLogger("sqlglot")
+JSON_EXTRACT_TYPE = t.Union[exp.JSONExtract, exp.JSONExtractScalar, exp.JSONExtractArray]
+
+DQUOTES_ESCAPING_JSON_FUNCTIONS = ("JSON_QUERY", "JSON_VALUE", "JSON_QUERY_ARRAY")
+
+
def _derived_table_values_to_unnest(self: BigQuery.Generator, expression: exp.Values) -> str:
if not expression.find_ancestor(exp.From, exp.Join):
return self.values_sql(expression)
@@ -324,6 +329,23 @@ def _build_contains_substring(args: t.List) -> exp.Contains | exp.Anonymous:
return exp.Contains(this=this, expression=expr)
+def _json_extract_sql(self: BigQuery.Generator, expression: JSON_EXTRACT_TYPE) -> str:
+ name = (expression._meta and expression.meta.get("name")) or expression.sql_name()
+ upper = name.upper()
+
+ dquote_escaping = upper in DQUOTES_ESCAPING_JSON_FUNCTIONS
+
+ if dquote_escaping:
+ self._quote_json_path_key_using_brackets = False
+
+ sql = rename_func(upper)(self, expression)
+
+ if dquote_escaping:
+ self._quote_json_path_key_using_brackets = True
+
+ return sql
+
+
class BigQuery(Dialect):
WEEK_OFFSET = -1
UNNEST_COLUMN_ONLY = True
@@ -869,6 +891,9 @@ class BigQuery(Dialect):
exp.ILike: no_ilike_sql,
exp.IntDiv: rename_func("DIV"),
exp.Int64: rename_func("INT64"),
+ exp.JSONExtract: _json_extract_sql,
+ exp.JSONExtractArray: _json_extract_sql,
+ exp.JSONExtractScalar: _json_extract_sql,
exp.JSONFormat: rename_func("TO_JSON_STRING"),
exp.Levenshtein: _levenshtein_sql,
exp.Max: max_or_greatest,
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 5e8e1dcc..842df5a7 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -658,6 +658,7 @@ class Generator(metaclass=_Generator):
"_next_name",
"_identifier_start",
"_identifier_end",
+ "_quote_json_path_key_using_brackets",
)
def __init__(
@@ -706,6 +707,8 @@ class Generator(metaclass=_Generator):
self._identifier_start = self.dialect.IDENTIFIER_START
self._identifier_end = self.dialect.IDENTIFIER_END
+ self._quote_json_path_key_using_brackets = True
+
def generate(self, expression: exp.Expression, copy: bool = True) -> str:
"""
Generates the SQL string corresponding to the given syntax tree.
@@ -2871,7 +2874,7 @@ class Generator(metaclass=_Generator):
if isinstance(expression, int):
return str(expression)
- if self.JSON_PATH_SINGLE_QUOTE_ESCAPE:
+ if self._quote_json_path_key_using_brackets and self.JSON_PATH_SINGLE_QUOTE_ESCAPE:
escaped = expression.replace("'", "\\'")
escaped = f"\\'{expression}\\'"
else:
@@ -4072,7 +4075,11 @@ class Generator(metaclass=_Generator):
return f".{this}"
this = self.json_path_part(this)
- return f"[{this}]" if self.JSON_PATH_BRACKETED_KEY_SUPPORTED else f".{this}"
+ return (
+ f"[{this}]"
+ if self._quote_json_path_key_using_brackets and self.JSON_PATH_BRACKETED_KEY_SUPPORTED
+ else f".{this}"
+ )
def _jsonpathsubscript_sql(self, expression: exp.JSONPathSubscript) -> str:
this = self.json_path_part(expression.this)
diff --git a/sqlglot/jsonpath.py b/sqlglot/jsonpath.py
index 911debe4..115bd159 100644
--- a/sqlglot/jsonpath.py
+++ b/sqlglot/jsonpath.py
@@ -146,6 +146,28 @@ def parse(path: str, dialect: DialectType = None) -> exp.JSONPath:
return node
+ def _parse_var_text() -> str:
+ """
+ Consumes & returns the text for a var. In BigQuery it's valid to have a key with spaces
+ in it, e.g JSON_QUERY(..., '$. a b c ') should produce a single JSONPathKey(' a b c ').
+ This is done by merging "consecutive" vars until a key separator is found (dot, colon etc)
+ or the path string is exhausted.
+ """
+ prev_index = i - 2
+
+ while _match(TokenType.VAR):
+ pass
+
+ start = 0 if prev_index < 0 else tokens[prev_index].end + 1
+
+ if i >= len(tokens):
+ # This key is the last token for the path, so it's text is the remaining path
+ text = path[start:]
+ else:
+ text = path[start : tokens[i].start]
+
+ return text
+
# We canonicalize the JSON path AST so that it always starts with a
# "root" element, so paths like "field" will be generated as "$.field"
_match(TokenType.DOLLAR)
@@ -155,8 +177,10 @@ def parse(path: str, dialect: DialectType = None) -> exp.JSONPath:
if _match(TokenType.DOT) or _match(TokenType.COLON):
recursive = _prev().text == ".."
- if _match(TokenType.VAR) or _match(TokenType.IDENTIFIER):
- value: t.Optional[str | exp.JSONPathWildcard] = _prev().text
+ if _match(TokenType.VAR):
+ value: t.Optional[str | exp.JSONPathWildcard] = _parse_var_text()
+ elif _match(TokenType.IDENTIFIER):
+ value = _prev().text
elif _match(TokenType.STAR):
value = exp.JSONPathWildcard()
else:
@@ -170,7 +194,9 @@ def parse(path: str, dialect: DialectType = None) -> exp.JSONPath:
raise ParseError(_error("Expected key name or * after DOT"))
elif _match(TokenType.L_BRACKET):
expressions.append(_parse_bracket())
- elif _match(TokenType.VAR) or _match(TokenType.IDENTIFIER):
+ elif _match(TokenType.VAR):
+ expressions.append(exp.JSONPathKey(this=_parse_var_text()))
+ elif _match(TokenType.IDENTIFIER):
expressions.append(exp.JSONPathKey(this=_prev().text))
elif _match(TokenType.STAR):
expressions.append(exp.JSONPathWildcard())
| bigquery: json_value transpilation gives incorrect result or fails due to escaping
noticed via sqlmesh, which recently updated `sqlglot (25.19.0 -> 25.31.4)`.
We have an expression like `select json_value('{"fu bar": 42}', "$.fu bar")` which runs correctly on bigquery yielding 42. Previously this was transpiled unchanged, but now results in an incorrect selection (note the added `.`s):
```
>>> sqlglot.__version__
'25.31.4'
>>> print(sqlglot.transpile("""select json_value('{"fu bar": 42}', "$.fu bar")""", read="bigquery", write="bigquery")[0])
SELECT json_value('{"fu bar": 42}', '$.fu.bar')
```
This query now yields `null` instead of 42 in bigquery.
The space-containing-key can be escaped in bigquery with double-quotes for [json_value](https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_value) which is apparently different from the bracketed escaping used by [json_extract](https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_extract)... 🤯 So this also works successfully in bigquery: `SELECT json_value('{"fu bar": 42}', '$."fu bar"')`.
But it now gets transpiled to sql which throws an error, namely `Invalid token in JSONPath at: ['fu bar']`:
```
>>> print(sqlglot.transpile("""SELECT json_value('{"fu bar": 42}', '$."fu bar"')""", read="bigquery", write="bigquery")[0])
SELECT json_value('{"fu bar": 42}', '$[\'fu bar\']')
```
a workaround is apparently to use the deprecated json_extract[_scalar] functions. i.e. the query `SELECT json_extract('{"fu bar": 42}', '$[\'fu bar\']')` works correctly and is transpiled unchanged:
```
>>> print(sqlglot.transpile("""SELECT json_extract('{"fu bar": 42}', '$[\\'fu bar\\']')""", read="bigquery", write="bigquery")[0])
SELECT json_extract('{"fu bar": 42}', '$[\'fu bar\']')
```
| tobymao/sqlglot | diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 366caded..26b12a15 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -1574,14 +1574,6 @@ WHERE
"snowflake": "IFF((y) <> 0, (x) / (y), NULL)",
},
)
- self.validate_all(
- """SELECT JSON_QUERY('{"class": {"students": []}}', '$.class')""",
- write={
- "bigquery": """SELECT JSON_QUERY('{"class": {"students": []}}', '$.class')""",
- "duckdb": """SELECT '{"class": {"students": []}}' -> '$.class'""",
- "snowflake": """SELECT GET_PATH(PARSE_JSON('{"class": {"students": []}}'), 'class')""",
- },
- )
self.validate_all(
"""SELECT JSON_VALUE_ARRAY('{"arr": [1, "a"]}', '$.arr')""",
write={
@@ -2139,7 +2131,16 @@ OPTIONS (
},
)
- def test_json_extract_scalar(self):
+ def test_json_extract(self):
+ self.validate_all(
+ """SELECT JSON_QUERY('{"class": {"students": []}}', '$.class')""",
+ write={
+ "bigquery": """SELECT JSON_QUERY('{"class": {"students": []}}', '$.class')""",
+ "duckdb": """SELECT '{"class": {"students": []}}' -> '$.class'""",
+ "snowflake": """SELECT GET_PATH(PARSE_JSON('{"class": {"students": []}}'), 'class')""",
+ },
+ )
+
for func in ("JSON_EXTRACT_SCALAR", "JSON_VALUE"):
with self.subTest(f"Testing BigQuery's {func}"):
self.validate_all(
@@ -2164,6 +2165,18 @@ OPTIONS (
self.parse_one(sql).sql("bigquery", normalize_functions="upper"), sql
)
+ # Test double quote escaping
+ for func in ("JSON_VALUE", "JSON_QUERY", "JSON_QUERY_ARRAY"):
+ self.validate_identity(
+ f"{func}(doc, '$. a b c .d')", f"""{func}(doc, '$." a b c ".d')"""
+ )
+
+ # Test single quote & bracket escaping
+ for func in ("JSON_EXTRACT", "JSON_EXTRACT_SCALAR", "JSON_EXTRACT_ARRAY"):
+ self.validate_identity(
+ f"{func}(doc, '$. a b c .d')", f"""{func}(doc, '$[\\' a b c \\'].d')"""
+ )
+
def test_json_extract_array(self):
for func in ("JSON_QUERY_ARRAY", "JSON_EXTRACT_ARRAY"):
with self.subTest(f"Testing BigQuery's {func}"):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | 25.32 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
identify==2.6.9
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pre_commit==4.2.0
Pygments==2.19.1
pytest @ file:///croot/pytest_1738938843180/work
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.7.2
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@954d8fd12740071e0951d1df3a405a4b9634868d#egg=sqlglot
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- distlib==0.3.9
- duckdb==1.2.1
- filelock==3.18.0
- identify==2.6.9
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pre-commit==4.2.0
- pygments==2.19.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.7.2
- six==1.17.0
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_extract"
] | [] | [
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery",
"tests/dialects/test_bigquery.py::TestBigQuery::test_convert",
"tests/dialects/test_bigquery.py::TestBigQuery::test_errors",
"tests/dialects/test_bigquery.py::TestBigQuery::test_format_temporal",
"tests/dialects/test_bigquery.py::TestBigQuery::test_gap_fill",
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_inline_constructor",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_extract_array",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_object",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::test_mod",
"tests/dialects/test_bigquery.py::TestBigQuery::test_models",
"tests/dialects/test_bigquery.py::TestBigQuery::test_null_ordering",
"tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names",
"tests/dialects/test_bigquery.py::TestBigQuery::test_range_type",
"tests/dialects/test_bigquery.py::TestBigQuery::test_regexp_extract",
"tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types",
"tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table",
"tests/dialects/test_bigquery.py::TestBigQuery::test_unix_seconds",
"tests/dialects/test_bigquery.py::TestBigQuery::test_unnest",
"tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions",
"tests/dialects/test_bigquery.py::TestBigQuery::test_warnings"
] | [] | MIT License | 20,323 | 1,639 | [
"sqlglot/dialects/bigquery.py",
"sqlglot/generator.py",
"sqlglot/jsonpath.py"
] |
|
PrefectHQ__prefect-16117 | fb919c67427d230ecea03a5273dfdd6de59bd697 | 2024-11-26 14:31:52 | fb919c67427d230ecea03a5273dfdd6de59bd697 | diff --git a/src/prefect/flows.py b/src/prefect/flows.py
index 27dc6e7c4a..cc4f781b64 100644
--- a/src/prefect/flows.py
+++ b/src/prefect/flows.py
@@ -10,6 +10,7 @@ import datetime
import importlib.util
import inspect
import os
+import sys
import tempfile
import warnings
from functools import partial, update_wrapper
@@ -137,6 +138,16 @@ logger = get_logger("flows")
if TYPE_CHECKING:
from prefect.deployments.runner import FlexibleScheduleList, RunnerDeployment
+# Handle Python 3.8 compatibility for GenericAlias
+if sys.version_info >= (3, 9):
+ from types import GenericAlias # novermin
+
+ GENERIC_ALIAS = (GenericAlias,)
+else:
+ from typing import _GenericAlias
+
+ GENERIC_ALIAS = (_GenericAlias,)
+
@PrefectObjectRegistry.register_instances
class Flow(Generic[P, R]):
@@ -530,18 +541,22 @@ class Flow(Generic[P, R]):
is_v1_type(param.annotation) for param in sig.parameters.values()
)
has_v1_models = any(
- issubclass(param.annotation, V1BaseModel)
- if isinstance(param.annotation, type)
- else False
+ (
+ isinstance(param.annotation, type)
+ and not isinstance(param.annotation, GENERIC_ALIAS)
+ and issubclass(param.annotation, V1BaseModel)
+ )
for param in sig.parameters.values()
)
has_v2_types = any(
is_v2_type(param.annotation) for param in sig.parameters.values()
)
has_v2_models = any(
- issubclass(param.annotation, V2BaseModel)
- if isinstance(param.annotation, type)
- else False
+ (
+ isinstance(param.annotation, type)
+ and not isinstance(param.annotation, GENERIC_ALIAS)
+ and issubclass(param.annotation, V2BaseModel)
+ )
for param in sig.parameters.values()
)
@@ -1601,7 +1616,9 @@ flow.from_source = Flow.from_source
def select_flow(
- flows: Iterable[Flow], flow_name: str = None, from_message: str = None
+ flows: Iterable[Flow],
+ flow_name: Optional[str] = None,
+ from_message: Optional[str] = None,
) -> Flow:
"""
Select the only flow in an iterable or a flow specified by name.
| flow.validate_parameters regression
### Bug summary
Certain flow deployments that worked fine before cannot be executed anymore after updating from Prefect 2.20.9 to any newer Prefect version.
This is due to a change in the parameter validation.
MRE:
```python
from prefect import flow
@flow
def my_flow(param: dict[str, str]):
pass
flow_run_params = {
"param": {'foo': 'bar'}
}
my_flow.validate_parameters(flow_run_params)
```
runs fine for Prefect <= 2.20.9.
But raises
```python
Traceback (most recent call last):
File "mre.py", line 13, in <module>
my_flow.validate_parameters(flow_run_params)
File "mre.py", line 532, in validate_parameters
has_v1_models = any(
File "mre.py", line 533, in <genexpr>
issubclass(param.annotation, V1BaseModel)
File "/usr/lib/python3.10/abc.py", line 123, in __subclasscheck__
return _abc_subclasscheck(cls, subclass)
TypeError: issubclass() arg 1 must be a class
```
for Prefect >= 2.20.10
### Version info
```Text
Version: 2.20.14
API version: 0.8.4
Python version: 3.10.12
Git commit: fb919c67
Built: Mon, Nov 18, 2024 4:41 PM
OS/Arch: linux/x86_64
Server type: cloud
```
### Additional context
related to https://github.com/PrefectHQ/prefect/pull/15608 | PrefectHQ/prefect | diff --git a/tests/test_flows.py b/tests/test_flows.py
index 47bfea76ec..097018f975 100644
--- a/tests/test_flows.py
+++ b/tests/test_flows.py
@@ -1517,6 +1517,23 @@ class TestFlowParameterTypes:
"secret": SecretStr("my secret")
}
+ @pytest.mark.skipif(
+ sys.version_info < (3, 9), reason="Python 3.9+ required for GenericAlias"
+ )
+ def test_flow_signature_can_contain_generic_type_hints(self):
+ """Test that generic type hints like dict[str, str] work correctly
+
+ this is a regression test for https://github.com/PrefectHQ/prefect/issues/16105
+ """
+
+ @flow
+ def my_flow(param: dict[str, str]): # novermin
+ return param
+
+ test_data = {"foo": "bar"}
+ assert my_flow(test_data) == test_data
+ assert my_flow.validate_parameters({"param": test_data}) == {"param": test_data}
+
class TestSubflowTaskInputs:
async def test_subflow_with_one_upstream_task_future(self, prefect_client):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 2.20 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiosqlite==0.21.0
alembic==1.15.2
annotated-types==0.7.0
anyio==4.6.0
apprise==1.9.3
asgi-lifespan==2.1.0
asttokens==3.0.0
async-timeout==5.0.1
asyncpg==0.29.0
attrs==25.3.0
babel==2.17.0
backrefs==5.8
boto3==1.37.23
botocore==1.37.23
cachetools==5.5.2
cairocffi==1.7.1
CairoSVG==2.7.1
certifi==2025.1.31
cffi==1.17.1
cfgv==3.4.0
charset-normalizer==3.4.1
click==8.1.8
cloudpickle==3.1.1
codespell==2.4.1
colorama==0.4.6
coolname==2.2.0
coverage==7.8.0
croniter==5.0.1
cryptography==44.0.2
cssselect2==0.8.0
dateparser==1.2.1
decorator==5.2.1
defusedxml==0.7.1
distlib==0.3.9
dnspython==2.7.0
docker==7.1.0
durationpy==0.9
email_validator==2.2.0
exceptiongroup==1.2.2
execnet==2.1.1
executing==2.2.0
filelock==3.18.0
fsspec==2025.3.1
ghp-import==2.1.0
google-auth==2.38.0
graphviz==0.20.3
greenlet==3.1.1
griffe==1.7.1
h11==0.14.0
h2==4.2.0
hpack==4.1.0
httpcore==1.0.7
httpx==0.28.1
humanize==4.12.2
hyperframe==6.1.0
identify==2.6.9
idna==3.10
importlib_metadata==8.6.1
importlib_resources==6.4.5
iniconfig==2.1.0
ipython==8.18.1
itsdangerous==2.2.0
jedi==0.19.2
Jinja2==3.1.6
jinja2-humanize-extension==0.4.0
jmespath==1.0.1
jsonpatch==1.33
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
kubernetes==31.0.0
Mako==1.3.9
Markdown==3.7
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib-inline==0.1.7
mdurl==0.1.2
mergedeep==1.3.4
mike==2.1.3
mkdocs==1.6.1
mkdocs-autorefs==1.4.1
mkdocs-gen-files==0.5.0
mkdocs-get-deps==0.2.0
mkdocs-material==9.6.10
mkdocs-material-extensions==1.3.1
mkdocstrings==0.29.1
mkdocstrings-python==1.16.8
moto==5.1.2
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
oauthlib==3.2.2
orjson==3.10.16
packaging==24.2
paginate==0.5.7
parso==0.8.4
pathspec==0.12.1
pendulum==2.1.2
pexpect==4.9.0
pillow==11.1.0
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
-e git+https://github.com/PrefectHQ/prefect.git@fb919c67427d230ecea03a5273dfdd6de59bd697#egg=prefect
prompt_toolkit==3.0.50
ptyprocess==0.7.0
pure_eval==0.2.3
py-cpuinfo==9.0.0
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycparser==2.22
pydantic==2.11.1
pydantic_core==2.33.0
Pygments==2.19.1
pymdown-extensions==10.14.3
pyparsing==3.2.3
pytest==8.3.5
pytest-asyncio==0.21.2
pytest-benchmark==5.1.0
pytest-cov==6.0.0
pytest-env==1.1.5
pytest-flakefinder==1.1.0
pytest-mock==3.14.0
pytest-timeout==2.3.1
pytest-xdist==3.3.1
python-dateutil==2.9.0.post0
python-multipart==0.0.20
python-slugify==8.0.4
pytkdocs==0.16.5
pytz==2024.2
pytzdata==2020.1
PyYAML==6.0.2
pyyaml_env_tag==0.1
readchar==4.2.1
referencing==0.36.2
regex==2024.11.6
requests==2.32.3
requests-oauthlib==2.0.0
responses==0.25.7
respx==0.22.0
rfc3339-validator==0.1.4
rich==13.9.4
rpds-py==0.24.0
rsa==4.9
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
ruff==0.11.2
s3transfer==0.11.4
shellingham==1.5.4
six==1.17.0
sniffio==1.3.1
SQLAlchemy==2.0.35
stack-data==0.6.3
text-unidecode==1.3
tinycss2==1.4.0
toml==0.10.2
tomli==2.2.1
traitlets==5.14.3
typer==0.13.1
types-cachetools==5.5.0.20240820
types-PyYAML==6.0.12.20250326
typing-inspection==0.4.0
typing_extensions==4.13.0
tzlocal==5.3.1
ujson==5.10.0
urllib3==1.26.20
uvicorn==0.34.0
vermin==1.6.0
verspec==0.1.0
virtualenv==20.29.3
watchdog==6.0.0
watchfiles==1.0.4
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.8.0
websockets==13.1
Werkzeug==3.1.3
xmltodict==0.14.2
zipp==3.21.0
| name: prefect
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiosqlite==0.21.0
- alembic==1.15.2
- annotated-types==0.7.0
- anyio==4.6.0
- apprise==1.9.3
- asgi-lifespan==2.1.0
- asttokens==3.0.0
- async-timeout==5.0.1
- asyncpg==0.29.0
- attrs==25.3.0
- babel==2.17.0
- backrefs==5.8
- boto3==1.37.23
- botocore==1.37.23
- cachetools==5.5.2
- cairocffi==1.7.1
- cairosvg==2.7.1
- certifi==2025.1.31
- cffi==1.17.1
- cfgv==3.4.0
- charset-normalizer==3.4.1
- click==8.1.8
- cloudpickle==3.1.1
- codespell==2.4.1
- colorama==0.4.6
- coolname==2.2.0
- coverage==7.8.0
- croniter==5.0.1
- cryptography==44.0.2
- cssselect2==0.8.0
- dateparser==1.2.1
- decorator==5.2.1
- defusedxml==0.7.1
- distlib==0.3.9
- dnspython==2.7.0
- docker==7.1.0
- durationpy==0.9
- email-validator==2.2.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- executing==2.2.0
- filelock==3.18.0
- fsspec==2025.3.1
- ghp-import==2.1.0
- google-auth==2.38.0
- graphviz==0.20.3
- greenlet==3.1.1
- griffe==1.7.1
- h11==0.14.0
- h2==4.2.0
- hpack==4.1.0
- httpcore==1.0.7
- httpx==0.28.1
- humanize==4.12.2
- hyperframe==6.1.0
- identify==2.6.9
- idna==3.10
- importlib-metadata==8.6.1
- importlib-resources==6.4.5
- iniconfig==2.1.0
- ipython==8.18.1
- itsdangerous==2.2.0
- jedi==0.19.2
- jinja2==3.1.6
- jinja2-humanize-extension==0.4.0
- jmespath==1.0.1
- jsonpatch==1.33
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- kubernetes==31.0.0
- mako==1.3.9
- markdown==3.7
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib-inline==0.1.7
- mdurl==0.1.2
- mergedeep==1.3.4
- mike==2.1.3
- mkdocs==1.6.1
- mkdocs-autorefs==1.4.1
- mkdocs-gen-files==0.5.0
- mkdocs-get-deps==0.2.0
- mkdocs-material==9.6.10
- mkdocs-material-extensions==1.3.1
- mkdocstrings==0.29.1
- mkdocstrings-python==1.16.8
- moto==5.1.2
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- oauthlib==3.2.2
- orjson==3.10.16
- packaging==24.2
- paginate==0.5.7
- parso==0.8.4
- pathspec==0.12.1
- pendulum==2.1.2
- pexpect==4.9.0
- pillow==11.1.0
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- prompt-toolkit==3.0.50
- ptyprocess==0.7.0
- pure-eval==0.2.3
- py-cpuinfo==9.0.0
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pycparser==2.22
- pydantic==2.11.1
- pydantic-core==2.33.0
- pygments==2.19.1
- pymdown-extensions==10.14.3
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-asyncio==0.21.2
- pytest-benchmark==5.1.0
- pytest-cov==6.0.0
- pytest-env==1.1.5
- pytest-flakefinder==1.1.0
- pytest-mock==3.14.0
- pytest-timeout==2.3.1
- pytest-xdist==3.3.1
- python-dateutil==2.9.0.post0
- python-multipart==0.0.20
- python-slugify==8.0.4
- pytkdocs==0.16.5
- pytz==2024.2
- pytzdata==2020.1
- pyyaml==6.0.2
- pyyaml-env-tag==0.1
- readchar==4.2.1
- referencing==0.36.2
- regex==2024.11.6
- requests==2.32.3
- requests-oauthlib==2.0.0
- responses==0.25.7
- respx==0.22.0
- rfc3339-validator==0.1.4
- rich==13.9.4
- rpds-py==0.24.0
- rsa==4.9
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.12
- ruff==0.11.2
- s3transfer==0.11.4
- shellingham==1.5.4
- six==1.17.0
- sniffio==1.3.1
- sqlalchemy==2.0.35
- stack-data==0.6.3
- text-unidecode==1.3
- tinycss2==1.4.0
- toml==0.10.2
- tomli==2.2.1
- traitlets==5.14.3
- typer==0.13.1
- types-cachetools==5.5.0.20240820
- types-pyyaml==6.0.12.20250326
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- tzlocal==5.3.1
- ujson==5.10.0
- urllib3==1.26.20
- uvicorn==0.34.0
- vermin==1.6.0
- verspec==0.1.0
- virtualenv==20.29.3
- watchdog==6.0.0
- watchfiles==1.0.4
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.8.0
- websockets==13.1
- werkzeug==3.1.3
- xmltodict==0.14.2
- zipp==3.21.0
prefix: /opt/conda/envs/prefect
| [
"tests/test_flows.py::TestFlowParameterTypes::test_flow_signature_can_contain_generic_type_hints"
] | [] | [
"tests/test_flows.py::test_flow",
"tests/test_flows.py::TestFlow::test_initializes",
"tests/test_flows.py::TestFlow::test_initializes_with_callable_flow_run_name",
"tests/test_flows.py::TestFlow::test_initializes_with_default_version",
"tests/test_flows.py::TestFlow::test_version_none_if_source_file_cannot_be_determined[None]",
"tests/test_flows.py::TestFlow::test_version_none_if_source_file_cannot_be_determined[<stdin>]",
"tests/test_flows.py::TestFlow::test_version_none_if_source_file_cannot_be_determined[<ipython-input-1-d31e8a6792d4>]",
"tests/test_flows.py::TestFlow::test_raises_on_bad_funcs",
"tests/test_flows.py::TestFlow::test_default_description_is_from_docstring",
"tests/test_flows.py::TestFlow::test_default_name_is_from_function",
"tests/test_flows.py::TestFlow::test_raises_clear_error_when_not_compatible_with_validator",
"tests/test_flows.py::TestFlow::test_invalid_name[my/flow]",
"tests/test_flows.py::TestFlow::test_invalid_name[my%flow]",
"tests/test_flows.py::TestFlow::test_invalid_name[my<flow]",
"tests/test_flows.py::TestFlow::test_invalid_name[my>flow]",
"tests/test_flows.py::TestFlow::test_invalid_name[my&flow]",
"tests/test_flows.py::TestFlow::test_invalid_run_name",
"tests/test_flows.py::TestFlow::test_using_return_state_in_flow_definition_raises_reserved",
"tests/test_flows.py::TestFlow::test_param_description_from_docstring",
"tests/test_flows.py::TestDecorator::test_flow_decorator_initializes",
"tests/test_flows.py::TestDecorator::test_flow_decorator_initializes_with_callable_flow_run_name",
"tests/test_flows.py::TestDecorator::test_flow_decorator_sets_default_version",
"tests/test_flows.py::TestDecorator::test_invalid_run_name",
"tests/test_flows.py::TestFlowWithOptions::test_with_options_allows_override_of_flow_settings",
"tests/test_flows.py::TestFlowWithOptions::test_with_options_uses_existing_settings_when_no_override",
"tests/test_flows.py::TestFlowWithOptions::test_with_options_can_unset_timeout_seconds_with_zero",
"tests/test_flows.py::TestFlowWithOptions::test_with_options_can_unset_retries_with_zero",
"tests/test_flows.py::TestFlowWithOptions::test_with_options_can_unset_retry_delay_seconds_with_zero",
"tests/test_flows.py::TestFlowWithOptions::test_with_options_uses_parent_flow_run_name_if_not_provided",
"tests/test_flows.py::TestFlowWithOptions::test_with_options_can_unset_result_options_with_none",
"tests/test_flows.py::TestFlowWithOptions::test_with_options_signature_aligns_with_flow_signature",
"tests/test_flows.py::TestFlowWithOptions::test_flow_name_non_string_raises[1-Expected",
"tests/test_flows.py::TestFlowWithOptions::test_flow_name_non_string_raises[get_flow_run_name-Expected",
"tests/test_flows.py::TestFlowWithOptions::test_flow_name_string_succeeds[test]",
"tests/test_flows.py::TestFlowWithOptions::test_flow_name_string_succeeds[test-todays_date]",
"tests/test_flows.py::TestFlowCall::test_call_creates_flow_run_and_runs",
"tests/test_flows.py::TestFlowCall::test_async_call_creates_flow_run_and_runs",
"tests/test_flows.py::TestFlowCall::test_call_with_return_state_true",
"tests/test_flows.py::TestFlowCall::test_call_coerces_parameter_types",
"tests/test_flows.py::TestFlowCall::test_call_with_variadic_args",
"tests/test_flows.py::TestFlowCall::test_call_with_variadic_keyword_args",
"tests/test_flows.py::TestFlowCall::test_fails_but_does_not_raise_on_incompatible_parameter_types",
"tests/test_flows.py::TestFlowCall::test_call_ignores_incompatible_parameter_types_if_asked",
"tests/test_flows.py::TestFlowCall::test_final_state_reflects_exceptions_during_run[error0]",
"tests/test_flows.py::TestFlowCall::test_final_state_reflects_exceptions_during_run[None]",
"tests/test_flows.py::TestFlowCall::test_final_state_respects_returned_state",
"tests/test_flows.py::TestFlowCall::test_flow_state_reflects_returned_task_run_state",
"tests/test_flows.py::TestFlowCall::test_flow_state_defaults_to_task_states_when_no_return_failure",
"tests/test_flows.py::TestFlowCall::test_flow_state_defaults_to_task_states_when_no_return_completed",
"tests/test_flows.py::TestFlowCall::test_flow_state_default_includes_subflow_states",
"tests/test_flows.py::TestFlowCall::test_flow_state_default_handles_nested_failures",
"tests/test_flows.py::TestFlowCall::test_flow_state_reflects_returned_multiple_task_run_states",
"tests/test_flows.py::TestFlowCall::test_call_execution_blocked_does_not_run_flow",
"tests/test_flows.py::TestFlowCall::test_flow_can_end_in_paused_state",
"tests/test_flows.py::TestFlowCall::test_flow_can_end_in_cancelled_state",
"tests/test_flows.py::TestFlowCall::test_flow_state_with_cancelled_tasks_has_cancelled_state",
"tests/test_flows.py::TestFlowCall::test_flow_with_cancelled_subflow_has_cancelled_state",
"tests/test_flows.py::TestSubflowCalls::test_subflow_call_with_no_tasks",
"tests/test_flows.py::TestSubflowCalls::test_subflow_call_with_returned_task",
"tests/test_flows.py::TestSubflowCalls::test_async_flow_with_async_subflow_and_async_task",
"tests/test_flows.py::TestSubflowCalls::test_async_flow_with_async_subflow_and_sync_task",
"tests/test_flows.py::TestSubflowCalls::test_async_flow_with_sync_subflow_and_sync_task",
"tests/test_flows.py::TestSubflowCalls::test_sync_flow_with_async_subflow",
"tests/test_flows.py::TestSubflowCalls::test_sync_flow_with_async_subflow_and_async_task",
"tests/test_flows.py::TestSubflowCalls::test_concurrent_async_subflow",
"tests/test_flows.py::TestSubflowCalls::test_recursive_async_subflow",
"tests/test_flows.py::TestSubflowCalls::test_recursive_sync_subflow",
"tests/test_flows.py::TestSubflowCalls::test_recursive_sync_flow",
"tests/test_flows.py::TestSubflowCalls::test_subflow_with_invalid_parameters_is_failed",
"tests/test_flows.py::TestSubflowCalls::test_subflow_with_invalid_parameters_fails_parent",
"tests/test_flows.py::TestSubflowCalls::test_subflow_with_invalid_parameters_is_not_failed_without_validation",
"tests/test_flows.py::TestSubflowCalls::test_subflow_relationship_tracking",
"tests/test_flows.py::TestSubflowCalls::test_sync_flow_with_async_subflow_and_task_that_awaits_result",
"tests/test_flows.py::TestFlowRunTags::test_flow_run_tags_added_at_call",
"tests/test_flows.py::TestFlowRunTags::test_flow_run_tags_added_to_subflows",
"tests/test_flows.py::TestFlowTimeouts::test_flows_fail_with_timeout",
"tests/test_flows.py::TestFlowTimeouts::test_async_flows_fail_with_timeout",
"tests/test_flows.py::TestFlowTimeouts::test_timeout_only_applies_if_exceeded",
"tests/test_flows.py::TestFlowTimeouts::test_user_timeout_is_not_hidden",
"tests/test_flows.py::TestFlowTimeouts::test_timeout_does_not_wait_for_completion_for_sync_flows",
"tests/test_flows.py::TestFlowTimeouts::test_timeout_stops_execution_at_next_task_for_sync_flows",
"tests/test_flows.py::TestFlowTimeouts::test_timeout_stops_execution_after_await_for_async_flows",
"tests/test_flows.py::TestFlowTimeouts::test_timeout_stops_execution_in_async_subflows",
"tests/test_flows.py::TestFlowTimeouts::test_timeout_stops_execution_in_sync_subflows",
"tests/test_flows.py::TestFlowTimeouts::test_subflow_timeout_waits_until_execution_starts",
"tests/test_flows.py::TestFlowParameterTypes::test_flow_parameters_can_be_unserializable_types",
"tests/test_flows.py::TestFlowParameterTypes::test_flow_parameters_can_be_pydantic_types",
"tests/test_flows.py::TestFlowParameterTypes::test_flow_parameters_can_be_jsonable_python_types[data0]",
"tests/test_flows.py::TestFlowParameterTypes::test_flow_parameters_can_be_jsonable_python_types[data1]",
"tests/test_flows.py::TestFlowParameterTypes::test_flow_parameters_can_be_jsonable_python_types[data2]",
"tests/test_flows.py::TestFlowParameterTypes::test_flow_parameters_can_be_jsonable_python_types[1]",
"tests/test_flows.py::TestFlowParameterTypes::test_flow_parameters_can_be_jsonable_python_types[foo]",
"tests/test_flows.py::TestFlowParameterTypes::test_flow_parameters_can_be_jsonable_python_types[ParameterTestEnum.X]",
"tests/test_flows.py::TestFlowParameterTypes::test_type_container_flow_inputs",
"tests/test_flows.py::TestFlowParameterTypes::test_subflow_parameters_can_be_unserializable_types",
"tests/test_flows.py::TestFlowParameterTypes::test_flow_parameters_can_be_unserializable_types_that_raise_value_error",
"tests/test_flows.py::TestFlowParameterTypes::test_flow_parameter_annotations_can_be_non_pydantic_classes",
"tests/test_flows.py::TestFlowParameterTypes::test_subflow_parameters_can_be_pydantic_types",
"tests/test_flows.py::TestFlowParameterTypes::test_subflow_parameters_from_future_can_be_unserializable_types",
"tests/test_flows.py::TestFlowParameterTypes::test_subflow_parameters_can_be_pydantic_models_from_task_future",
"tests/test_flows.py::TestFlowParameterTypes::test_subflow_parameter_annotations_can_be_normal_classes",
"tests/test_flows.py::TestFlowParameterTypes::test_flow_signature_can_contain_pydantic_v1_SecretStr",
"tests/test_flows.py::TestSubflowTaskInputs::test_subflow_with_one_upstream_task_future",
"tests/test_flows.py::TestSubflowTaskInputs::test_subflow_with_one_upstream_task_state",
"tests/test_flows.py::TestSubflowTaskInputs::test_subflow_with_one_upstream_task_result",
"tests/test_flows.py::TestSubflowTaskInputs::test_subflow_with_one_upstream_task_future_and_allow_failure",
"tests/test_flows.py::TestSubflowTaskInputs::test_subflow_with_one_upstream_task_state_and_allow_failure",
"tests/test_flows.py::TestSubflowTaskInputs::test_subflow_with_no_upstream_tasks",
"tests/test_flows.py::TestFlowRunLogs::test_user_logs_are_sent_to_orion",
"tests/test_flows.py::TestFlowRunLogs::test_repeated_flow_calls_send_logs_to_orion",
"tests/test_flows.py::TestFlowRunLogs::test_exception_info_is_included_in_log",
"tests/test_flows.py::TestFlowRunLogs::test_raised_exceptions_include_tracebacks",
"tests/test_flows.py::TestFlowRunLogs::test_opt_out_logs_are_not_sent_to_api",
"tests/test_flows.py::TestFlowRunLogs::test_logs_are_given_correct_id",
"tests/test_flows.py::TestSubflowRunLogs::test_subflow_logs_are_written_correctly",
"tests/test_flows.py::TestSubflowRunLogs::test_subflow_logs_are_written_correctly_with_tasks",
"tests/test_flows.py::TestFlowRetries::test_flow_retry_with_error_in_flow",
"tests/test_flows.py::TestFlowRetries::test_flow_retry_with_error_in_flow_and_successful_task",
"tests/test_flows.py::TestFlowRetries::test_flow_retry_with_no_error_in_flow_and_one_failed_task",
"tests/test_flows.py::TestFlowRetries::test_flow_retry_with_error_in_flow_and_one_failed_task",
"tests/test_flows.py::TestFlowRetries::test_flow_retry_with_no_error_in_flow_and_one_failed_child_flow",
"tests/test_flows.py::TestFlowRetries::test_flow_retry_with_error_in_flow_and_one_successful_child_flow",
"tests/test_flows.py::TestFlowRetries::test_flow_retry_with_error_in_flow_and_one_failed_child_flow",
"tests/test_flows.py::TestFlowRetries::test_flow_retry_with_failed_child_flow_with_failed_task",
"tests/test_flows.py::TestFlowRetries::test_flow_retry_with_error_in_flow_and_one_failed_task_with_retries",
"tests/test_flows.py::TestFlowRetries::test_flow_retry_with_error_in_flow_and_one_failed_task_with_retries_cannot_exceed_retries",
"tests/test_flows.py::TestFlowRetries::test_flow_with_failed_child_flow_with_retries",
"tests/test_flows.py::TestFlowRetries::test_parent_flow_retries_failed_child_flow_with_retries",
"tests/test_flows.py::TestFlowRetries::test_global_retry_config",
"tests/test_flows.py::TestLoadFlowFromEntrypoint::test_load_flow_from_entrypoint",
"tests/test_flows.py::TestLoadFlowFromEntrypoint::test_load_flow_from_entrypoint_with_absolute_path",
"tests/test_flows.py::TestLoadFlowFromEntrypoint::test_load_flow_from_entrypoint_with_module_path",
"tests/test_flows.py::TestLoadFlowFromEntrypoint::test_load_flow_from_entrypoint_script_error_loads_placeholder",
"tests/test_flows.py::TestLoadFlowFromEntrypoint::test_handling_script_with_unprotected_call_in_flow_script",
"tests/test_flows.py::TestLoadFlowFromEntrypoint::test_load_flow_from_entrypoint_with_use_placeholder_flow",
"tests/test_flows.py::TestFlowRunName::test_invalid_runtime_run_name",
"tests/test_flows.py::TestFlowRunName::test_sets_run_name_when_provided",
"tests/test_flows.py::TestFlowRunName::test_sets_run_name_with_params_including_defaults",
"tests/test_flows.py::TestFlowRunName::test_sets_run_name_with_function",
"tests/test_flows.py::TestFlowRunName::test_sets_run_name_with_function_using_runtime_context",
"tests/test_flows.py::TestFlowRunName::test_sets_run_name_with_function_not_returning_string",
"tests/test_flows.py::TestFlowRunName::test_sets_run_name_once",
"tests/test_flows.py::TestFlowRunName::test_sets_run_name_once_per_call",
"tests/test_flows.py::TestFlowHooksWithKwargs::test_hook_with_extra_default_arg",
"tests/test_flows.py::TestFlowHooksWithKwargs::test_hook_with_bound_kwargs",
"tests/test_flows.py::TestFlowHooksOnCompletion::test_noniterable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnCompletion::test_empty_hook_list_raises",
"tests/test_flows.py::TestFlowHooksOnCompletion::test_noncallable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnCompletion::test_callable_noncallable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnCompletion::test_on_completion_hooks_run_on_completed",
"tests/test_flows.py::TestFlowHooksOnCompletion::test_on_completion_hooks_dont_run_on_failure",
"tests/test_flows.py::TestFlowHooksOnCompletion::test_other_completion_hooks_run_if_a_hook_fails",
"tests/test_flows.py::TestFlowHooksOnCompletion::test_on_completion_hooks_work_with_sync_and_async[create_hook-create_hook]",
"tests/test_flows.py::TestFlowHooksOnCompletion::test_on_completion_hooks_work_with_sync_and_async[create_hook-create_async_hook]",
"tests/test_flows.py::TestFlowHooksOnCompletion::test_on_completion_hooks_work_with_sync_and_async[create_async_hook-create_hook]",
"tests/test_flows.py::TestFlowHooksOnCompletion::test_on_completion_hooks_work_with_sync_and_async[create_async_hook-create_async_hook]",
"tests/test_flows.py::TestFlowHooksOnFailure::test_noniterable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnFailure::test_empty_hook_list_raises",
"tests/test_flows.py::TestFlowHooksOnFailure::test_noncallable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnFailure::test_callable_noncallable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnFailure::test_on_failure_hooks_run_on_failure",
"tests/test_flows.py::TestFlowHooksOnFailure::test_on_failure_hooks_dont_run_on_completed",
"tests/test_flows.py::TestFlowHooksOnFailure::test_other_failure_hooks_run_if_a_hook_fails",
"tests/test_flows.py::TestFlowHooksOnFailure::test_on_failure_hooks_work_with_sync_and_async[create_hook-create_hook]",
"tests/test_flows.py::TestFlowHooksOnFailure::test_on_failure_hooks_work_with_sync_and_async[create_hook-create_async_hook]",
"tests/test_flows.py::TestFlowHooksOnFailure::test_on_failure_hooks_work_with_sync_and_async[create_async_hook-create_hook]",
"tests/test_flows.py::TestFlowHooksOnFailure::test_on_failure_hooks_work_with_sync_and_async[create_async_hook-create_async_hook]",
"tests/test_flows.py::TestFlowHooksOnFailure::test_on_failure_hooks_run_on_bad_parameters",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_noniterable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_empty_hook_list_raises",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_noncallable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_callable_noncallable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_on_cancellation_hooks_run_on_cancelled_state",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_on_cancellation_hooks_are_ignored_if_terminal_state_completed",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_on_cancellation_hooks_are_ignored_if_terminal_state_failed",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_other_cancellation_hooks_run_if_one_hook_fails",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_on_cancelled_hook_on_subflow_succeeds",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_on_cancellation_hooks_work_with_sync_and_async[create_hook-create_hook]",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_on_cancellation_hooks_work_with_sync_and_async[create_hook-create_async_hook]",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_on_cancellation_hooks_work_with_sync_and_async[create_async_hook-create_hook]",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_on_cancellation_hooks_work_with_sync_and_async[create_async_hook-create_async_hook]",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_on_cancellation_hook_called_on_sigterm_from_flow_with_cancelling_state",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_on_cancellation_hook_not_called_on_sigterm_from_flow_without_cancelling_state",
"tests/test_flows.py::TestFlowHooksOnCancellation::test_on_cancellation_hooks_respect_env_var",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_noniterable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_empty_hook_list_raises",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_noncallable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_callable_noncallable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_on_crashed_hooks_run_on_crashed_state",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_on_crashed_hooks_are_ignored_if_terminal_state_completed",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_on_crashed_hooks_are_ignored_if_terminal_state_failed",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_other_crashed_hooks_run_if_one_hook_fails",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_on_crashed_hooks_work_with_sync_and_async[create_hook-create_hook]",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_on_crashed_hooks_work_with_sync_and_async[create_hook-create_async_hook]",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_on_crashed_hooks_work_with_sync_and_async[create_async_hook-create_hook]",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_on_crashed_hooks_work_with_sync_and_async[create_async_hook-create_async_hook]",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_on_crashed_hook_on_subflow_succeeds",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_on_crashed_hook_called_on_sigterm_from_flow_without_cancelling_state",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_on_crashed_hook_not_called_on_sigterm_from_flow_with_cancelling_state",
"tests/test_flows.py::TestFlowHooksOnCrashed::test_on_crashed_hooks_respect_env_var",
"tests/test_flows.py::TestFlowHooksOnRunning::test_noniterable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnRunning::test_empty_hook_list_raises",
"tests/test_flows.py::TestFlowHooksOnRunning::test_noncallable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnRunning::test_callable_noncallable_hook_raises",
"tests/test_flows.py::TestFlowHooksOnRunning::test_on_running_hooks_run_on_running",
"tests/test_flows.py::TestFlowHooksOnRunning::test_on_running_hooks_run_on_failure",
"tests/test_flows.py::TestFlowHooksOnRunning::test_other_running_hooks_run_if_a_hook_fails",
"tests/test_flows.py::TestFlowHooksOnRunning::test_on_running_hooks_work_with_sync_and_async[create_hook-create_hook]",
"tests/test_flows.py::TestFlowHooksOnRunning::test_on_running_hooks_work_with_sync_and_async[create_hook-create_async_hook]",
"tests/test_flows.py::TestFlowHooksOnRunning::test_on_running_hooks_work_with_sync_and_async[create_async_hook-create_hook]",
"tests/test_flows.py::TestFlowHooksOnRunning::test_on_running_hooks_work_with_sync_and_async[create_async_hook-create_async_hook]",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_returns_runner_deployment",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_accepts_interval",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_can_produce_a_module_path_entrypoint",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_accepts_cron",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_accepts_rrule",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_invalid_name_raises",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_raises_on_multiple_schedule_parameters[kwargs0]",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_raises_on_multiple_schedule_parameters[kwargs1]",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_raises_on_multiple_schedule_parameters[kwargs2]",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_raises_on_multiple_schedule_parameters[kwargs3]",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_raises_on_multiple_schedule_parameters[kwargs4]",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_raises_on_multiple_schedule_parameters[kwargs5]",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_respects_with_options_name_from_flow",
"tests/test_flows.py::TestFlowToDeployment::test_to_deployment_respects_with_options_name_from_storage",
"tests/test_flows.py::TestFlowServe::test_serve_prints_message",
"tests/test_flows.py::TestFlowServe::test_serve_creates_deployment",
"tests/test_flows.py::TestFlowServe::test_serve_can_user_a_module_path_entrypoint",
"tests/test_flows.py::TestFlowServe::test_serve_handles__file__",
"tests/test_flows.py::TestFlowServe::test_serve_creates_deployment_with_interval_schedule",
"tests/test_flows.py::TestFlowServe::test_serve_creates_deployment_with_cron_schedule",
"tests/test_flows.py::TestFlowServe::test_serve_creates_deployment_with_rrule_schedule",
"tests/test_flows.py::TestFlowServe::test_serve_raises_on_multiple_schedules[kwargs0]",
"tests/test_flows.py::TestFlowServe::test_serve_raises_on_multiple_schedules[kwargs1]",
"tests/test_flows.py::TestFlowServe::test_serve_raises_on_multiple_schedules[kwargs2]",
"tests/test_flows.py::TestFlowServe::test_serve_raises_on_multiple_schedules[kwargs3]",
"tests/test_flows.py::TestFlowServe::test_serve_raises_on_multiple_schedules[kwargs4]",
"tests/test_flows.py::TestFlowServe::test_serve_raises_on_multiple_schedules[kwargs5]",
"tests/test_flows.py::TestFlowServe::test_serve_starts_a_runner",
"tests/test_flows.py::TestFlowServe::test_serve_passes_limit_specification_to_runner",
"tests/test_flows.py::TestFlowFromSource::test_load_flow_from_source_with_storage",
"tests/test_flows.py::TestFlowFromSource::test_loaded_flow_to_deployment_has_storage",
"tests/test_flows.py::TestFlowFromSource::test_loaded_flow_can_be_updated_with_options",
"tests/test_flows.py::TestFlowFromSource::test_load_flow_from_source_with_url",
"tests/test_flows.py::TestFlowFromSource::test_accepts_storage_blocks",
"tests/test_flows.py::TestFlowFromSource::test_raises_on_unsupported_type",
"tests/test_flows.py::TestFlowFromSource::test_load_flow_from_source_on_flow_function",
"tests/test_flows.py::TestFlowDeploy::test_calls_deploy_with_expected_args",
"tests/test_flows.py::TestFlowDeploy::test_calls_deploy_with_expected_args_remote_flow",
"tests/test_flows.py::TestFlowDeploy::test_deploy_non_existent_work_pool",
"tests/test_flows.py::TestFlowDeploy::test_no_worker_command_for_push_pool",
"tests/test_flows.py::TestFlowDeploy::test_suppress_console_output",
"tests/test_flows.py::TestLoadFlowArgumentFromEntrypoint::test_load_flow_name_from_entrypoint",
"tests/test_flows.py::TestLoadFlowArgumentFromEntrypoint::test_load_flow_name_from_entrypoint_no_name",
"tests/test_flows.py::TestLoadFlowArgumentFromEntrypoint::test_load_async_flow_from_entrypoint_no_name",
"tests/test_flows.py::TestLoadFlowArgumentFromEntrypoint::test_load_flow_name_from_entrypoint_dynamic_name_fstring",
"tests/test_flows.py::TestLoadFlowArgumentFromEntrypoint::test_load_flow_name_from_entrypoint_dyanmic_name_function",
"tests/test_flows.py::TestLoadFlowArgumentFromEntrypoint::test_load_flow_name_from_entrypoint_dynamic_name_depends_on_missing_import",
"tests/test_flows.py::TestLoadFlowArgumentFromEntrypoint::test_load_flow_description_from_entrypoint",
"tests/test_flows.py::TestLoadFlowArgumentFromEntrypoint::test_load_flow_description_from_entrypoint_no_description",
"tests/test_flows.py::TestLoadFlowArgumentFromEntrypoint::test_load_no_flow",
"tests/test_flows.py::TestSafeLoadFlowFromEntrypoint::test_flow_not_found",
"tests/test_flows.py::TestSafeLoadFlowFromEntrypoint::test_basic_operation",
"tests/test_flows.py::TestSafeLoadFlowFromEntrypoint::test_get_parameter_schema_from_safe_loaded_flow",
"tests/test_flows.py::TestSafeLoadFlowFromEntrypoint::test_dynamic_name_fstring",
"tests/test_flows.py::TestSafeLoadFlowFromEntrypoint::test_dynamic_name_function",
"tests/test_flows.py::TestSafeLoadFlowFromEntrypoint::test_dynamic_name_depends_on_missing_import",
"tests/test_flows.py::TestSafeLoadFlowFromEntrypoint::test_annotations_and_defaults_rely_on_imports",
"tests/test_flows.py::TestSafeLoadFlowFromEntrypoint::test_annotations_rely_on_missing_import",
"tests/test_flows.py::TestSafeLoadFlowFromEntrypoint::test_defaults_rely_on_missing_import",
"tests/test_flows.py::TestSafeLoadFlowFromEntrypoint::test_function_with_enum_argument",
"tests/test_flows.py::TestSafeLoadFlowFromEntrypoint::test_handles_dynamically_created_models",
"tests/test_flows.py::TestSafeLoadFlowFromEntrypoint::test_raises_name_error_when_loaded_flow_cannot_run"
] | [] | Apache License 2.0 | 20,325 | 574 | [
"src/prefect/flows.py"
] |
|
tobymao__sqlglot-4448 | 74dc39ba8649fd8292c97c82088b39b08f531702 | 2024-11-26 14:58:43 | 3945acc4a0dfd58147de929c9a2c71734d8f1ade | diff --git a/sqlglot/dialects/dialect.py b/sqlglot/dialects/dialect.py
index 5b12f56c..eca2bf59 100644
--- a/sqlglot/dialects/dialect.py
+++ b/sqlglot/dialects/dialect.py
@@ -411,6 +411,9 @@ class Dialect(metaclass=_Dialect):
is cast to x's type to match it instead.
"""
+ SUPPORTS_VALUES_DEFAULT = True
+ """Whether the DEFAULT keyword is supported in the VALUES clause."""
+
REGEXP_EXTRACT_DEFAULT_GROUP = 0
"""The default value for the capturing group."""
diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py
index ec702a09..056d974c 100644
--- a/sqlglot/dialects/presto.py
+++ b/sqlglot/dialects/presto.py
@@ -198,6 +198,7 @@ class Presto(Dialect):
TYPED_DIVISION = True
TABLESAMPLE_SIZE_IS_PERCENT = True
LOG_BASE_FIRST: t.Optional[bool] = None
+ SUPPORTS_VALUES_DEFAULT = False
TIME_MAPPING = MySQL.TIME_MAPPING
diff --git a/sqlglot/parser.py b/sqlglot/parser.py
index 2a477d9b..12419e8c 100644
--- a/sqlglot/parser.py
+++ b/sqlglot/parser.py
@@ -2946,8 +2946,13 @@ class Parser(metaclass=_Parser):
)
def _parse_value(self) -> t.Optional[exp.Tuple]:
+ def _parse_value_expression() -> t.Optional[exp.Expression]:
+ if self.dialect.SUPPORTS_VALUES_DEFAULT and self._match(TokenType.DEFAULT):
+ return exp.var(self._prev.text.upper())
+ return self._parse_expression()
+
if self._match(TokenType.L_PAREN):
- expressions = self._parse_csv(self._parse_expression)
+ expressions = self._parse_csv(_parse_value_expression)
self._match_r_paren()
return self.expression(exp.Tuple, expressions=expressions)
| “Default” is a keyword, but it was converted into a string value after translation.
```
Python 3.13.0 | packaged by Anaconda, Inc. | (main, Oct 7 2024, 16:25:56) [Clang 14.0.6 ] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import sqlglot
>>> sqlglot.transpile("insert into t(i) values (default)", read="mysql", write="duckdb")[0]
'INSERT INTO t (i) VALUES ("default")'
```
“Default” is a keyword, but it was converted into a string value after translation. | tobymao/sqlglot | diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index fd6b36f1..9e5b74e0 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -118,6 +118,13 @@ class TestMySQL(Validator):
"CREATE TABLE `foo` (a VARCHAR(10), INDEX idx_a (a DESC))",
)
+ self.validate_all(
+ "insert into t(i) values (default)",
+ write={
+ "duckdb": "INSERT INTO t (i) VALUES (DEFAULT)",
+ "mysql": "INSERT INTO t (i) VALUES (DEFAULT)",
+ },
+ )
self.validate_all(
"CREATE TABLE t (id INT UNSIGNED)",
write={
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 25.32 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
filelock==3.18.0
identify==2.6.9
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pre_commit==4.2.0
Pygments==2.19.1
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.7.2
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@74dc39ba8649fd8292c97c82088b39b08f531702#egg=sqlglot
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- duckdb==1.2.1
- execnet==2.1.1
- filelock==3.18.0
- identify==2.6.9
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pre-commit==4.2.0
- pygments==2.19.1
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.7.2
- six==1.17.0
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_mysql.py::TestMySQL::test_ddl"
] | [] | [
"tests/dialects/test_mysql.py::TestMySQL::test_at_time_zone",
"tests/dialects/test_mysql.py::TestMySQL::test_bits_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions",
"tests/dialects/test_mysql.py::TestMySQL::test_convert",
"tests/dialects/test_mysql.py::TestMySQL::test_date_format",
"tests/dialects/test_mysql.py::TestMySQL::test_escape",
"tests/dialects/test_mysql.py::TestMySQL::test_explain",
"tests/dialects/test_mysql.py::TestMySQL::test_grant",
"tests/dialects/test_mysql.py::TestMySQL::test_hexadecimal_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_identity",
"tests/dialects/test_mysql.py::TestMySQL::test_introducers",
"tests/dialects/test_mysql.py::TestMySQL::test_is_null",
"tests/dialects/test_mysql.py::TestMySQL::test_json_object",
"tests/dialects/test_mysql.py::TestMySQL::test_json_value",
"tests/dialects/test_mysql.py::TestMySQL::test_match_against",
"tests/dialects/test_mysql.py::TestMySQL::test_monthname",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql_time",
"tests/dialects/test_mysql.py::TestMySQL::test_safe_div",
"tests/dialects/test_mysql.py::TestMySQL::test_set_variable",
"tests/dialects/test_mysql.py::TestMySQL::test_show_columns",
"tests/dialects/test_mysql.py::TestMySQL::test_show_db_like_or_where_sql",
"tests/dialects/test_mysql.py::TestMySQL::test_show_engine",
"tests/dialects/test_mysql.py::TestMySQL::test_show_errors",
"tests/dialects/test_mysql.py::TestMySQL::test_show_events",
"tests/dialects/test_mysql.py::TestMySQL::test_show_grants",
"tests/dialects/test_mysql.py::TestMySQL::test_show_index",
"tests/dialects/test_mysql.py::TestMySQL::test_show_like_or_where",
"tests/dialects/test_mysql.py::TestMySQL::test_show_name",
"tests/dialects/test_mysql.py::TestMySQL::test_show_processlist",
"tests/dialects/test_mysql.py::TestMySQL::test_show_profile",
"tests/dialects/test_mysql.py::TestMySQL::test_show_replica_status",
"tests/dialects/test_mysql.py::TestMySQL::test_show_simple",
"tests/dialects/test_mysql.py::TestMySQL::test_show_tables",
"tests/dialects/test_mysql.py::TestMySQL::test_string_literals",
"tests/dialects/test_mysql.py::TestMySQL::test_timestamp_trunc",
"tests/dialects/test_mysql.py::TestMySQL::test_types"
] | [] | MIT License | 20,326 | 511 | [
"sqlglot/dialects/dialect.py",
"sqlglot/dialects/presto.py",
"sqlglot/parser.py"
] |
|
nilearn__nilearn-4802 | b734e7af7f886de41daf19f3d030d89ac6f2c2c7 | 2024-11-27 12:54:55 | c39ca5ec29463a262b635a47aadfb5dfc19cafb6 | Remi-Gau: @tsalo @htwangtw can you have a look at this one maybe?
I know you rely more on pybids for your BIDS things but you may have opinions about this new behavior of get_bids_files.
For context this is necessary because one of our examples has 'BIDS' derivatives that are not BIDS compliant (no space entity) but I figured that it may be generally useful to have a way to fetch files that DO NOT have specific entity in general.
Remi-Gau: > The long term solution would be to fix the imported dataset, right ?
yes that would be good too but the changes from this PR can also be useful outside the narrow "nilearn dev need a hack for poorly named example data" usecase. 😜 | diff --git a/examples/07_advanced/plot_beta_series.py b/examples/07_advanced/plot_beta_series.py
index 00e89bdf8..2adb3e58b 100644
--- a/examples/07_advanced/plot_beta_series.py
+++ b/examples/07_advanced/plot_beta_series.py
@@ -60,9 +60,6 @@ to build the LSS beta series.
"""
# sphinx_gallery_thumbnail_number = -2
-# %%
-from nilearn import image, plotting
-
# %%
# Prepare data and analysis parameters
# ------------------------------------
@@ -70,12 +67,14 @@ from nilearn import image, plotting
# and create a standard :class:`~nilearn.glm.first_level.FirstLevelModel`.
from nilearn.datasets import fetch_language_localizer_demo_dataset
from nilearn.glm.first_level import FirstLevelModel, first_level_from_bids
+from nilearn.plotting import plot_design_matrix, plot_stat_map, show
data = fetch_language_localizer_demo_dataset(legacy_output=False)
models, models_run_imgs, events_dfs, models_confounds = first_level_from_bids(
dataset_path=data.data_dir,
task_label="languagelocalizer",
+ space_label="",
sub_labels=["01"],
img_filters=[("desc", "preproc")],
n_jobs=2,
@@ -110,8 +109,8 @@ standard_glm.fit(fmri_file, events_df)
# The standard design matrix has one column for each condition, along with
# columns for the confound regressors and drifts
fig, ax = plt.subplots(figsize=(5, 10))
-plotting.plot_design_matrix(standard_glm.design_matrices_[0], axes=ax)
-fig.show()
+plot_design_matrix(standard_glm.design_matrices_[0], axes=ax)
+show()
# %%
# Define the LSA model
@@ -140,13 +139,14 @@ lsa_glm = FirstLevelModel(**glm_parameters)
lsa_glm.fit(fmri_file, lsa_events_df)
fig, ax = plt.subplots(figsize=(10, 10))
-plotting.plot_design_matrix(lsa_glm.design_matrices_[0], axes=ax)
-fig.show()
+plot_design_matrix(lsa_glm.design_matrices_[0], axes=ax)
+show()
# %%
# Aggregate beta maps from the LSA model based on condition
# `````````````````````````````````````````````````````````
# Collect the :term:`Parameter Estimate` maps
+from nilearn.image import concat_imgs
lsa_beta_maps = {cond: [] for cond in events_df["trial_type"].unique()}
trialwise_conditions = lsa_events_df["trial_type"].unique()
@@ -159,7 +159,7 @@ for condition in trialwise_conditions:
# We can concatenate the lists of 3D maps into a single 4D beta series for
# each condition, if we want
lsa_beta_maps = {
- name: image.concat_imgs(maps) for name, maps in lsa_beta_maps.items()
+ name: concat_imgs(maps) for name, maps in lsa_beta_maps.items()
}
# %%
@@ -241,7 +241,7 @@ for i_trial in range(events_df.shape[0]):
# We can concatenate the lists of 3D maps into a single 4D beta series for
# each condition, if we want
lss_beta_maps = {
- name: image.concat_imgs(maps) for name, maps in lss_beta_maps.items()
+ name: concat_imgs(maps) for name, maps in lss_beta_maps.items()
}
# %%
@@ -249,13 +249,13 @@ lss_beta_maps = {
# `````````````````````````````````````````````````
fig, axes = plt.subplots(ncols=3, figsize=(20, 10))
for i_trial in range(3):
- plotting.plot_design_matrix(
+ plot_design_matrix(
lss_design_matrices[i_trial],
axes=axes[i_trial],
)
axes[i_trial].set_title(f"Trial {i_trial + 1}")
-fig.show()
+show()
# %%
# Compare the three modeling approaches
@@ -276,10 +276,10 @@ fig, axes = plt.subplots(
)
for i_ax, _ in enumerate(axes):
- plotting.plot_design_matrix(DESIGN_MATRICES[i_ax], axes=axes[i_ax])
+ plot_design_matrix(DESIGN_MATRICES[i_ax], axes=axes[i_ax])
axes[i_ax].set_title(DM_TITLES[i_ax])
-fig.show()
+show()
# %%
# Applications of beta series
@@ -365,7 +365,7 @@ string_connectivity_img = brain_masker.inverse_transform(string_corrs.T)
# Show both correlation maps
fig, axes = plt.subplots(figsize=(10, 8), nrows=2)
-display = plotting.plot_stat_map(
+display = plot_stat_map(
language_connectivity_img,
threshold=0.5,
vmax=1,
@@ -373,6 +373,7 @@ display = plotting.plot_stat_map(
title="Language",
figure=fig,
axes=axes[0],
+ cmap="bwr",
)
display.add_markers(
marker_coords=coords,
@@ -380,7 +381,7 @@ display.add_markers(
marker_size=300,
)
-display = plotting.plot_stat_map(
+display = plot_stat_map(
string_connectivity_img,
threshold=0.5,
vmax=1,
@@ -388,6 +389,7 @@ display = plotting.plot_stat_map(
title="String",
figure=fig,
axes=axes[1],
+ cmap="bwr",
)
display.add_markers(
marker_coords=coords,
@@ -396,7 +398,7 @@ display.add_markers(
)
fig.suptitle("LSS Beta Series Functional Connectivity")
-fig.show()
+show()
# %%
# References
diff --git a/examples/07_advanced/plot_bids_analysis.py b/examples/07_advanced/plot_bids_analysis.py
index 663946c80..a650a5e75 100644
--- a/examples/07_advanced/plot_bids_analysis.py
+++ b/examples/07_advanced/plot_bids_analysis.py
@@ -17,6 +17,11 @@ More specifically:
3. Fit a second level model on the fitted first level models.
Notice that in this case the preprocessed :term:`bold<BOLD>`
images were already normalized to the same :term:`MNI` space.
+
+.. note::
+
+ We are only using a subset of participants from the dataset
+ to lower the run time of the example.
"""
from nilearn import plotting
@@ -59,7 +64,12 @@ task_label = "languagelocalizer"
models_events,
models_confounds,
) = first_level_from_bids(
- data.data_dir, task_label, img_filters=[("desc", "preproc")], n_jobs=2
+ data.data_dir,
+ task_label,
+ img_filters=[("desc", "preproc")],
+ n_jobs=2,
+ space_label="",
+ sub_labels=["01", "02", "03", "04"], # comment to run all subjects
)
# %%
@@ -103,9 +113,16 @@ p001_unc = norm.isf(0.001)
# %%
# Prepare figure for concurrent plot of individual maps.
+from math import ceil
+
import matplotlib.pyplot as plt
+import numpy as np
+
+ncols = 3
+nrows = ceil(len(models) / ncols)
-fig, axes = plt.subplots(nrows=2, ncols=5, figsize=(8, 4.5))
+fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(8, 4.5))
+axes = np.atleast_2d(axes)
model_and_args = zip(models, models_run_imgs, models_events, models_confounds)
for midx, (model, imgs, events, confounds) in enumerate(model_and_args):
# fit the GLM
@@ -117,9 +134,10 @@ for midx, (model, imgs, events, confounds) in enumerate(model_and_args):
colorbar=False,
threshold=p001_unc,
title=f"sub-{model.subject_label}",
- axes=axes[int(midx / 5), int(midx % 5)],
+ axes=axes[int(midx / ncols), int(midx % ncols)],
plot_abs=False,
display_mode="x",
+ cmap="bwr",
)
fig.suptitle("subjects z_map language network (unc p<0.001)")
plotting.show()
@@ -160,5 +178,6 @@ plotting.plot_glass_brain(
plot_abs=False,
display_mode="x",
figure=plt.figure(figsize=(5, 4)),
+ cmap="bwr",
)
plotting.show()
diff --git a/examples/07_advanced/plot_surface_bids_analysis.py b/examples/07_advanced/plot_surface_bids_analysis.py
index 7b57f041b..62fa5a69a 100644
--- a/examples/07_advanced/plot_surface_bids_analysis.py
+++ b/examples/07_advanced/plot_surface_bids_analysis.py
@@ -59,6 +59,7 @@ task_label = "languagelocalizer"
models, run_imgs, events, confounds = first_level_from_bids(
data.data_dir,
task_label,
+ space_label="",
img_filters=[("desc", "preproc")],
hrf_model="glover + derivative",
n_jobs=2,
diff --git a/nilearn/glm/first_level/first_level.py b/nilearn/glm/first_level/first_level.py
index a84a5684e..1e517e27c 100644
--- a/nilearn/glm/first_level/first_level.py
+++ b/nilearn/glm/first_level/first_level.py
@@ -1792,7 +1792,9 @@ def _get_processed_imgs(
verbose=verbose,
)
- if space_label is not None and space_label not in ("fsaverage5"):
+ if space_label is not None and (
+ space_label == "" or space_label not in ("fsaverage5")
+ ):
imgs = get_bids_files(
main_path=derivatives_path,
modality_folder="func",
diff --git a/nilearn/interfaces/bids/query.py b/nilearn/interfaces/bids/query.py
index e6fa3f8ab..2ed53234e 100644
--- a/nilearn/interfaces/bids/query.py
+++ b/nilearn/interfaces/bids/query.py
@@ -252,7 +252,8 @@ def get_bids_files(
files = [
file_
for file_ in files
- if (key in file_ and file_[key] == value)
+ if (key not in file_ and value == "")
+ or (key in file_ and file_[key] == value)
]
return [ref_file["file_path"] for ref_file in files]
| Localizer BIDS dataset derivative not BIDS compliant
This caused some examples to fail because our the BIDS derivatives files in our example dataset are not proper derivatives: they do not have a space entity.
- examples/07_advanced/plot_beta_series.py
- examples/07_advanced/plot_bids_analysis.py
see for example: https://github.com/nilearn/nilearn/actions/runs/12038962141/job/33569168884#step:13:27052
Will fix in separate PR.
_Originally posted by @Remi-Gau in https://github.com/nilearn/nilearn/issues/4507#issuecomment-2503775909_
| nilearn/nilearn | diff --git a/nilearn/interfaces/tests/test_bids.py b/nilearn/interfaces/tests/test_bids.py
index 17aa1223b..0a91c071a 100644
--- a/nilearn/interfaces/tests/test_bids.py
+++ b/nilearn/interfaces/tests/test_bids.py
@@ -294,6 +294,54 @@ def test_get_bids_files(tmp_path):
assert len(selection) == 12 * n_sub
+def test_get_bids_files_no_space_entity(tmp_path):
+ """Pass empty string for a label ignores files containing that label.
+
+ - remove space entity only from subject 01
+ - check that only files from the appropriate subject are returned
+ when passing ("space", "T1w") or ("space", "")
+ """
+ n_sub = 2
+
+ bids_path = create_fake_bids_dataset(
+ base_dir=tmp_path,
+ n_sub=n_sub,
+ n_ses=2,
+ tasks=["main"],
+ n_runs=[2],
+ )
+
+ for file in (bids_path / "derivatives" / "sub-01").glob(
+ "**/*_space-*.nii.gz"
+ ):
+ stem = [
+ entity
+ for entity in file.stem.split("_")
+ if not entity.startswith("space")
+ ]
+ file.replace(file.with_stem("_".join(stem)))
+
+ selection = get_bids_files(
+ bids_path / "derivatives",
+ file_tag="bold",
+ file_type="nii.gz",
+ filters=[("space", "T1w")],
+ )
+
+ assert selection
+ assert not any("sub-01" in file for file in selection)
+
+ selection = get_bids_files(
+ bids_path / "derivatives",
+ file_tag="bold",
+ file_type="nii.gz",
+ filters=[("space", "")],
+ )
+
+ assert selection
+ assert not any("sub-02" in file for file in selection)
+
+
def test_parse_bids_filename():
fields = ["sub", "ses", "task", "lolo"]
labels = ["01", "01", "langloc", "lala"]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 5
} | 0.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-randomly",
"pytest-reporter-html1",
"pytest-xdist"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
ansi2html==1.9.2
babel==2.17.0
beautifulsoup4==4.13.3
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
contourpy==1.3.0
coverage==7.8.0
cycler==0.12.1
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
fonttools==4.56.0
furo==2024.8.6
htmlmin2==0.1.13
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig==2.1.0
Jinja2==3.1.6
joblib==1.4.2
kaleido==0.2.1
kiwisolver==1.4.7
latexcodec==3.0.0
lxml==5.3.1
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.9.4
mdit-py-plugins==0.4.2
mdurl==0.1.2
memory-profiler==0.61.0
myst-parser==3.0.1
narwhals==1.32.0
nibabel==5.3.2
-e git+https://github.com/nilearn/nilearn.git@b734e7af7f886de41daf19f3d030d89ac6f2c2c7#egg=nilearn
numpy==2.0.2
numpydoc==1.8.0
packaging==24.2
pandas==2.2.3
pillow==11.1.0
platformdirs==4.3.7
plotly==6.0.1
pluggy==1.5.0
psutil==7.0.0
pybtex==0.24.0
pybtex-docutils==1.0.3
Pygments==2.19.1
pyparsing==3.2.3
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-randomly==3.16.0
pytest-reporter==0.5.3
pytest-reporter-html1==0.9.2
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
requests==2.32.3
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
snowballstemmer==2.2.0
soupsieve==2.6
Sphinx==7.4.7
sphinx-basic-ng==1.0.0b2
sphinx-copybutton==0.5.2
sphinx-gallery==0.19.0
sphinx_design==0.6.1
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-bibtex==2.6.3
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
sphinxext-opengraph==0.9.1
tabulate==0.9.0
threadpoolctl==3.6.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: nilearn
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- ansi2html==1.9.2
- babel==2.17.0
- beautifulsoup4==4.13.3
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- contourpy==1.3.0
- coverage==7.8.0
- cycler==0.12.1
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- fonttools==4.56.0
- furo==2024.8.6
- htmlmin2==0.1.13
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- jinja2==3.1.6
- joblib==1.4.2
- kaleido==0.2.1
- kiwisolver==1.4.7
- latexcodec==3.0.0
- lxml==5.3.1
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib==3.9.4
- mdit-py-plugins==0.4.2
- mdurl==0.1.2
- memory-profiler==0.61.0
- myst-parser==3.0.1
- narwhals==1.32.0
- nibabel==5.3.2
- nilearn==0.10.5.dev275+gb734e7af7
- numpy==2.0.2
- numpydoc==1.8.0
- packaging==24.2
- pandas==2.2.3
- pillow==11.1.0
- platformdirs==4.3.7
- plotly==6.0.1
- pluggy==1.5.0
- psutil==7.0.0
- pybtex==0.24.0
- pybtex-docutils==1.0.3
- pygments==2.19.1
- pyparsing==3.2.3
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-randomly==3.16.0
- pytest-reporter==0.5.3
- pytest-reporter-html1==0.9.2
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- requests==2.32.3
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.12
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- snowballstemmer==2.2.0
- soupsieve==2.6
- sphinx==7.4.7
- sphinx-basic-ng==1.0.0b2
- sphinx-copybutton==0.5.2
- sphinx-design==0.6.1
- sphinx-gallery==0.19.0
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-bibtex==2.6.3
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sphinxext-opengraph==0.9.1
- tabulate==0.9.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/nilearn
| [
"nilearn/interfaces/tests/test_bids.py::test_get_bids_files_no_space_entity"
] | [] | [
"nilearn/interfaces/tests/test_bids.py::test_save_glm_to_bids_contrast_definitions[contrasts0-1]",
"nilearn/interfaces/tests/test_bids.py::test_save_glm_to_bids[sub-01_ses-01_task-nback]",
"nilearn/interfaces/tests/test_bids.py::test_save_glm_to_bids[sub-01_task-nback]",
"nilearn/interfaces/tests/test_bids.py::test_save_glm_to_bids_contrast_definitions[contrasts0-sub-01_task-nback_]",
"nilearn/interfaces/tests/test_bids.py::test_infer_repetition_time_from_dataset",
"nilearn/interfaces/tests/test_bids.py::test_get_metadata_from_bids",
"nilearn/interfaces/tests/test_bids.py::test_save_glm_to_bids_serialize_affine",
"nilearn/interfaces/tests/test_bids.py::test_parse_bids_filename",
"nilearn/interfaces/tests/test_bids.py::test_save_glm_to_bids_second_level[task-nback]",
"nilearn/interfaces/tests/test_bids.py::test_save_glm_to_bids_contrast_definitions[AAA",
"nilearn/interfaces/tests/test_bids.py::test_get_bids_files_inheritance_principle_root_folder",
"nilearn/interfaces/tests/test_bids.py::test_save_glm_to_bids_contrast_definitions[contrasts0-sub-01_ses-01_task-nback]",
"nilearn/interfaces/tests/test_bids.py::test_get_bids_files",
"nilearn/interfaces/tests/test_bids.py::test_infer_slice_timing_start_time_from_dataset",
"nilearn/interfaces/tests/test_bids.py::test_save_glm_to_bids[task-nback]",
"nilearn/interfaces/tests/test_bids.py::test_save_glm_to_bids_errors"
] | [] | New BSD License | 20,337 | 2,612 | [
"examples/07_advanced/plot_beta_series.py",
"examples/07_advanced/plot_bids_analysis.py",
"examples/07_advanced/plot_surface_bids_analysis.py",
"nilearn/glm/first_level/first_level.py",
"nilearn/interfaces/bids/query.py"
] |
reframe-hpc__reframe-3328 | ada1dfcc52075f586a2e27f1198dfc1c493de5cc | 2024-11-27 18:08:01 | 925be11f6c01c0f035f9bc8ce9ac5ce3daff2049 | diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py
index f55bf989..2337546b 100644
--- a/reframe/frontend/cli.py
+++ b/reframe/frontend/cli.py
@@ -1638,9 +1638,12 @@ def main():
if options.max_retries and runner.stats.failed(run=0):
printer.retry_report(report)
- # Print a failure report if we had failures in the last run
+ # Print a failure report in case of failures.
+ # If `--duration` or `--reruns` is used then take into account
+ # all runs, else (i.e., `--max-retries`) only the last run.
success = True
- if runner.stats.failed():
+ runid = None if options.duration or options.reruns else -1
+ if runner.stats.failed(run=runid):
success = False
printer.failure_report(
report,
@@ -1734,6 +1737,12 @@ def main():
sys.exit(1)
sys.exit(0)
+ except errors.RunSessionTimeout as err:
+ printer.warning(f'run session stopped: {err}')
+ if not success:
+ sys.exit(1)
+ else:
+ sys.exit(0)
except (Exception, KeyboardInterrupt, errors.ReframeFatalError):
exc_info = sys.exc_info()
tb = ''.join(traceback.format_exception(*exc_info))
| When running with `--duration` do not exit with a non-zero exit code just because of the time out of the last run
Also, we should document what is the expected exit code in case of reruns, either through the `--duration` or the `--reruns` option. For example, for `--reruns` it seems that reframe exits with zero if the last rerun was successful, despite failures in previous reruns. | reframe-hpc/reframe | diff --git a/unittests/test_cli.py b/unittests/test_cli.py
index 1e176359..799b45f9 100644
--- a/unittests/test_cli.py
+++ b/unittests/test_cli.py
@@ -1076,6 +1076,13 @@ def test_reruns_with_duration(run_reframe):
assert returncode == 1
+def test_exitcode_timeout(run_reframe):
+ assert_no_crash(*run_reframe(
+ more_options=['--duration=5s', '-n^HelloTest'],
+ checkpath=['unittests/resources/checks/hellocheck.py']
+ ))
+
+
@pytest.fixture(params=['name', 'rname', 'uid', 'ruid', 'random'])
def exec_order(request):
return request.param
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 4.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc make"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | archspec==0.2.5
argcomplete==3.6.1
attrs==25.3.0
certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
filelock==3.18.0
idna==3.10
iniconfig==2.1.0
jsonschema==3.2.0
lxml==5.3.0
packaging==24.2
pluggy==1.5.0
pyrsistent==0.20.0
pytest==8.3.5
pytest-parallel==0.1.1
PyYAML==6.0.2
-e git+https://github.com/reframe-hpc/reframe.git@ada1dfcc52075f586a2e27f1198dfc1c493de5cc#egg=ReFrame_HPC
requests==2.32.3
semver==3.0.4
six==1.17.0
tabulate==0.9.0
tblib==3.0.0
tomli==2.2.1
urllib3==2.3.0
wcwidth==0.2.13
| name: reframe
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- archspec==0.2.5
- argcomplete==3.6.1
- attrs==25.3.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- idna==3.10
- iniconfig==2.1.0
- jsonschema==3.2.0
- lxml==5.3.0
- packaging==24.2
- pluggy==1.5.0
- pyrsistent==0.20.0
- pytest==8.3.5
- pytest-parallel==0.1.1
- pyyaml==6.0.2
- reframe-hpc==4.7.0
- requests==2.32.3
- semver==3.0.4
- six==1.17.0
- tabulate==0.9.0
- tblib==3.0.0
- tomli==2.2.1
- urllib3==2.3.0
- wcwidth==0.2.13
prefix: /opt/conda/envs/reframe
| [
"unittests/test_cli.py::test_exitcode_timeout"
] | [] | [
"unittests/test_cli.py::test_check_success[run]",
"unittests/test_cli.py::test_check_success[dry_run]",
"unittests/test_cli.py::test_check_restore_session_failed",
"unittests/test_cli.py::test_check_restore_session_succeeded_test",
"unittests/test_cli.py::test_check_restore_session_check_search_path",
"unittests/test_cli.py::test_check_success_force_local[run]",
"unittests/test_cli.py::test_check_success_force_local[dry_run]",
"unittests/test_cli.py::test_report_file_with_sessionid[run]",
"unittests/test_cli.py::test_report_file_with_sessionid[dry_run]",
"unittests/test_cli.py::test_report_ends_with_newline[run]",
"unittests/test_cli.py::test_report_ends_with_newline[dry_run]",
"unittests/test_cli.py::test_report_file_symlink_latest[run]",
"unittests/test_cli.py::test_report_file_symlink_latest[dry_run]",
"unittests/test_cli.py::test_check_failure",
"unittests/test_cli.py::test_check_setup_failure",
"unittests/test_cli.py::test_check_kbd_interrupt",
"unittests/test_cli.py::test_check_sanity_failure[run]",
"unittests/test_cli.py::test_check_sanity_failure[dry_run]",
"unittests/test_cli.py::test_dont_restage",
"unittests/test_cli.py::test_checkpath_symlink",
"unittests/test_cli.py::test_performance_check_failure[run]",
"unittests/test_cli.py::test_performance_check_failure[dry_run]",
"unittests/test_cli.py::test_perflogdir_from_env",
"unittests/test_cli.py::test_performance_report[run-storage=yes]",
"unittests/test_cli.py::test_performance_report[run-storage=no]",
"unittests/test_cli.py::test_performance_report[dry_run-storage=yes]",
"unittests/test_cli.py::test_performance_report[dry_run-storage=no]",
"unittests/test_cli.py::test_skip_system_check_option[run]",
"unittests/test_cli.py::test_skip_system_check_option[dry_run]",
"unittests/test_cli.py::test_skip_prgenv_check_option[run]",
"unittests/test_cli.py::test_skip_prgenv_check_option[dry_run]",
"unittests/test_cli.py::test_sanity_of_checks",
"unittests/test_cli.py::test_unknown_system",
"unittests/test_cli.py::test_sanity_of_optconfig",
"unittests/test_cli.py::test_checkpath_recursion",
"unittests/test_cli.py::test_same_output_stage_dir",
"unittests/test_cli.py::test_execution_modes[run]",
"unittests/test_cli.py::test_execution_modes[dry_run]",
"unittests/test_cli.py::test_invalid_mode_error",
"unittests/test_cli.py::test_timestamp_option",
"unittests/test_cli.py::test_timestamp_option_default",
"unittests/test_cli.py::test_list_empty_prgenvs_check_and_options",
"unittests/test_cli.py::test_list_check_with_empty_prgenvs",
"unittests/test_cli.py::test_list_empty_prgenvs_in_check_and_options",
"unittests/test_cli.py::test_list_with_details",
"unittests/test_cli.py::test_list_concretized",
"unittests/test_cli.py::test_list_tags",
"unittests/test_cli.py::test_list_tests_with_deps",
"unittests/test_cli.py::test_list_tests_with_fixtures",
"unittests/test_cli.py::test_filtering_multiple_criteria_name",
"unittests/test_cli.py::test_filtering_multiple_criteria_hash",
"unittests/test_cli.py::test_filtering_exclude_hash",
"unittests/test_cli.py::test_filtering_cpu_only",
"unittests/test_cli.py::test_filtering_gpu_only",
"unittests/test_cli.py::test_filtering_by_expr",
"unittests/test_cli.py::test_show_config_all",
"unittests/test_cli.py::test_show_config_param",
"unittests/test_cli.py::test_show_config_unknown_param",
"unittests/test_cli.py::test_show_config_null_param",
"unittests/test_cli.py::test_verbosity",
"unittests/test_cli.py::test_verbosity_with_check",
"unittests/test_cli.py::test_quiesce_with_check",
"unittests/test_cli.py::test_failure_stats[run]",
"unittests/test_cli.py::test_failure_stats[dry_run]",
"unittests/test_cli.py::test_maxfail_option",
"unittests/test_cli.py::test_maxfail_invalid_option",
"unittests/test_cli.py::test_maxfail_negative",
"unittests/test_cli.py::test_distribute_build_remotely_runonly[run]",
"unittests/test_cli.py::test_distribute_build_remotely_runonly[dry_run]",
"unittests/test_cli.py::test_repeat_option[run]",
"unittests/test_cli.py::test_repeat_option[dry_run]",
"unittests/test_cli.py::test_repeat_invalid_option",
"unittests/test_cli.py::test_repeat_negative",
"unittests/test_cli.py::test_parameterize_tests",
"unittests/test_cli.py::test_parameterize_tests_invalid_params",
"unittests/test_cli.py::test_reruns_negative",
"unittests/test_cli.py::test_reruns_with_duration",
"unittests/test_cli.py::test_exec_order[name]",
"unittests/test_cli.py::test_exec_order[rname]",
"unittests/test_cli.py::test_exec_order[uid]",
"unittests/test_cli.py::test_exec_order[ruid]",
"unittests/test_cli.py::test_exec_order[random]",
"unittests/test_cli.py::test_detect_host_topology",
"unittests/test_cli.py::test_detect_host_topology_file",
"unittests/test_cli.py::test_external_vars[run]",
"unittests/test_cli.py::test_external_vars[dry_run]",
"unittests/test_cli.py::test_external_vars_invalid_expr[run]",
"unittests/test_cli.py::test_external_vars_invalid_expr[dry_run]",
"unittests/test_cli.py::test_fixture_registry_env_sys",
"unittests/test_cli.py::test_fixture_resolution[run]",
"unittests/test_cli.py::test_fixture_resolution[dry_run]",
"unittests/test_cli.py::test_dynamic_tests[run]",
"unittests/test_cli.py::test_dynamic_tests[dry_run]",
"unittests/test_cli.py::test_dynamic_tests_filtering[run]",
"unittests/test_cli.py::test_dynamic_tests_filtering[dry_run]",
"unittests/test_cli.py::test_testlib_inherit_fixture_in_different_files",
"unittests/test_cli.py::test_storage_options[csv]",
"unittests/test_cli.py::test_storage_options[plain]",
"unittests/test_cli.py::test_storage_options[pretty]",
"unittests/test_cli.py::test_disabled_results_storage[--delete-stored-sessions=now-1d:now]",
"unittests/test_cli.py::test_disabled_results_storage[--describe-stored-sessions=now-1d:now]",
"unittests/test_cli.py::test_disabled_results_storage[--describe-stored-testcases=now-1d:now]",
"unittests/test_cli.py::test_disabled_results_storage[--list-stored-sessions]",
"unittests/test_cli.py::test_disabled_results_storage[--list-stored-testcases=now-1d:now/mean:/]",
"unittests/test_cli.py::test_disabled_results_storage[--performance-compare=now-1d:now/now-1d/mean:/]",
"unittests/test_cli.py::test_session_annotations",
"unittests/test_cli.py::test_performance_compare[csv]",
"unittests/test_cli.py::test_performance_compare[plain]",
"unittests/test_cli.py::test_performance_compare[pretty]"
] | [] | BSD 3-Clause "New" or "Revised" License | 20,343 | 337 | [
"reframe/frontend/cli.py"
] |
|
radiocosmology__alpenhorn-260 | dd9cfffff57cd83f7dc059e20ed3280c60ca0bba | 2024-11-28 05:16:00 | dd9cfffff57cd83f7dc059e20ed3280c60ca0bba | ketiltrout: I've relaised there's a problem with this that I need to fix...
ketiltrout: OK, that wasn't as difficult as I thought it would be. The issue in my original PR was there needs to be different checks for non-recursive and recursive `ArchiveFileImportRequest`s, which I have now implemented. | diff --git a/alpenhorn/cli/acq/create.py b/alpenhorn/cli/acq/create.py
index 7e49b77..87efb92 100644
--- a/alpenhorn/cli/acq/create.py
+++ b/alpenhorn/cli/acq/create.py
@@ -3,6 +3,7 @@
import click
import peewee as pw
+from ...common.util import invalid_import_path
from ...db import database_proxy, ArchiveAcq
from ..options import cli_option
from ..cli import echo
@@ -17,6 +18,11 @@ def create(name):
another existing Acquisition.
"""
+ # Validate
+ rejection_reason = invalid_import_path(name)
+ if rejection_reason:
+ raise click.ClickException(f"invalid name: {rejection_reason}")
+
with database_proxy.atomic():
try:
ArchiveAcq.get(name=name)
diff --git a/alpenhorn/cli/file/create.py b/alpenhorn/cli/file/create.py
index d091c76..a54ee64 100644
--- a/alpenhorn/cli/file/create.py
+++ b/alpenhorn/cli/file/create.py
@@ -4,7 +4,7 @@ import click
import pathlib
import peewee as pw
-from ...common.util import md5sum_file
+from ...common.util import md5sum_file, invalid_import_path
from ...db import (
ArchiveAcq,
ArchiveFile,
@@ -91,6 +91,11 @@ def create(name, acq_name, from_file, md5, prefix, size):
if size is not None and size < 0:
raise click.ClickException(f"negative file size.")
+ # Validate
+ rejection_reason = invalid_import_path(name)
+ if rejection_reason:
+ raise click.ClickException(f"invalid name: {rejection_reason}")
+
validate_md5(md5)
# Scan a file, if requested
diff --git a/alpenhorn/common/util.py b/alpenhorn/common/util.py
index 905b752..119e9f5 100644
--- a/alpenhorn/common/util.py
+++ b/alpenhorn/common/util.py
@@ -313,3 +313,47 @@ def pretty_deltat(seconds: float) -> str:
# For short durations, include tenths of a second
return f"{seconds:.1f}s"
+
+
+def invalid_import_path(name: str) -> str | None:
+ """Is `name` invalid as an import path?
+
+ i.e., can `name` be used as an ArchiveAcq or
+ ArchiveFile name (or both, combined).
+
+ Returns
+ -------
+ rejection_reason: str or None
+ A string describing why `name` was rejected,
+ or None, if the name was valid.
+ """
+
+ # Can't be the null string
+ if name == "":
+ return "empty path"
+
+ # Can't simply be "." or ".."
+ if name == "." or name == "..":
+ return "invalid path"
+
+ # Can't start with "/" or "./" or "../"
+ if name.startswith("/") or name.startswith("./") or name.startswith("../"):
+ return "invalid start"
+
+ # Can't end with "/" or "/." or "/.."
+ if name.endswith("/") or name.endswith("/.") or name.endswith("/.."):
+ return "invalid end"
+
+ # Can't have multiple "/" in a row
+ if "//" in name:
+ return "repeated /"
+
+ # Can't have internal "/./"
+ if "/./" in name:
+ return 'invalid path element "."'
+
+ # Can't have internal "/../"
+ if "/../" in name:
+ return 'invalid path element ".."'
+
+ return None
diff --git a/alpenhorn/daemon/auto_import.py b/alpenhorn/daemon/auto_import.py
index a25852d..2241324 100644
--- a/alpenhorn/daemon/auto_import.py
+++ b/alpenhorn/daemon/auto_import.py
@@ -15,6 +15,7 @@ from watchdog.events import FileSystemEventHandler
from .. import db
from ..common import config, extensions
+from ..common.util import invalid_import_path
from ..db import (
ArchiveAcq,
ArchiveFile,
@@ -173,6 +174,15 @@ def _import_file(
req.complete()
return
+ # Vet acq_name from extension
+ rejection_reason = invalid_import_path(str(acq_name))
+ if rejection_reason:
+ log.warning(f'Rejecting invalid acq path "{acq_name}": {rejection_reason}')
+ if req:
+ log.info(f"Completed import request #{req.id}.")
+ req.complete()
+ return
+
file_name = path.relative_to(acq_name)
# If a copy already exists, we're done
diff --git a/alpenhorn/daemon/update.py b/alpenhorn/daemon/update.py
index 08c2aed..9d71d6b 100644
--- a/alpenhorn/daemon/update.py
+++ b/alpenhorn/daemon/update.py
@@ -491,7 +491,29 @@ class UpdateableNode(updateable_base):
continue
if req.recurse:
- # If recursion was requested, run a scan on the path
+ # Ensure the base directory we want to scan is in-tree
+ fullpath = pathlib.Path(self.db.root, path)
+ try:
+ fullpath = fullpath.resolve(strict=True)
+ except OSError as e:
+ log.warning(
+ f"Ignoring import request of unresolvable scan path: {fullpath}"
+ )
+ req.complete()
+ continue
+
+ # Recompute the relative path after resolution, or skip scan if we're
+ # now out-of-tree
+ try:
+ path = fullpath.relative_to(self.db.root)
+ except ValueError:
+ log.warning(
+ f"Ignoring import request of out-of-tree scan path: {fullpath}"
+ )
+ req.complete()
+ continue
+
+ # Run scan
Task(
func=auto_import.scan,
queue=self._queue,
@@ -500,7 +522,17 @@ class UpdateableNode(updateable_base):
name=f'Scan "{path}" on {self.name}',
)
else:
- # Otherwise, try to directly import the path
+ # Check that the import path is valid
+ rejection_reason = util.invalid_import_path(req.path)
+ if rejection_reason:
+ log.warning(
+ f'Ignoring request for import of invalid path "{req.path}": '
+ + rejection_reason
+ )
+ req.complete()
+ continue
+
+ # Try to directly import the path
auto_import.import_file(self, self._queue, req.path, req.register, req)
def update(self) -> None:
| Vetting acq and file names
With the CLI commands "acq create" and "file create" taking arbitrary strings that end up being used for acq or file names, there's going to need to be some vetting of the names.
The daemon implicitly sets some limits on the names to make them sanitary, by extracting them from a path to a file on a Node, but the CLI doesn't have that, so it should at least prohibit:
* starting or ending with `/`
* two or more `/` in a row (`/////`)
* `/./` or `/../` path elements
Anything else? We definitely need to prohibit acq and file names from escaping from `node.root`. Though I'm more fine with names which simply produce unaccessible paths (since those are fairly harmless, outside of the acq/file they're associated with being unaccessible to the daemon).
If the combination `acqname/filename` formed a concrete path we could use standard path normalisation, but they don't (becasue the CLI can be run somewhere without any data present), so we need to do something else.
The daemon always forms a path like: `node_root/acq_name/file_name` so leading/trailing slashes can't force it out-of-tree, but internal `/../` might.
We _do_ need to protect against having both an acq named `"acq"` and one named, say, `"acq/"`, since those would end up referring to the same files on disk. | radiocosmology/alpenhorn | diff --git a/tests/cli/acq/test_create.py b/tests/cli/acq/test_create.py
index 3794d59..ad50a09 100644
--- a/tests/cli/acq/test_create.py
+++ b/tests/cli/acq/test_create.py
@@ -14,6 +14,15 @@ def test_create(clidb, cli):
ArchiveAcq.get(name="TEST")
+def test_bad_name(clidb, cli):
+ """Test rejection of invalid name."""
+
+ cli(1, ["acq", "create", "/test/"])
+
+ # No acq was created
+ assert ArchiveAcq.select().count() == 0
+
+
def test_create_existing(clidb, cli):
"""Test creating an acq that already exists."""
diff --git a/tests/cli/file/test_create.py b/tests/cli/file/test_create.py
index cc179bf..e625764 100644
--- a/tests/cli/file/test_create.py
+++ b/tests/cli/file/test_create.py
@@ -16,6 +16,24 @@ def test_no_data(clidb, cli):
cli(2, ["file", "create", "name", "Acq", "--size=3"])
+def test_bad_name(clidb, cli):
+ """Test an invalid file name."""
+
+ ArchiveAcq.create(name="Acq")
+
+ cli(
+ 1,
+ [
+ "file",
+ "create",
+ "name/../name",
+ "Acq",
+ "--md5=0123456789ABCDEF0123456789ABCDEF",
+ "--size=3",
+ ],
+ )
+
+
def test_negative_size(clidb, cli):
"""Test a negative size."""
diff --git a/tests/common/test_util.py b/tests/common/test_util.py
index 77f3817..697ad62 100644
--- a/tests/common/test_util.py
+++ b/tests/common/test_util.py
@@ -127,3 +127,33 @@ def test_pretty_deltat():
assert util.pretty_deltat(0.01) == "0.0s"
assert util.pretty_deltat(0) == "0.0s"
assert util.pretty_deltat(-1) == "-1.0s"
+
+
+def test_invalid_import_path():
+ """Test invalid_import_path"""
+
+ # Explicitly forbidden names
+ assert util.invalid_import_path("") is not None
+ assert util.invalid_import_path(".") is not None
+ assert util.invalid_import_path("..") is not None
+
+ # Forbidden starts
+ assert util.invalid_import_path("/name") is not None
+ assert util.invalid_import_path("./name") is not None
+ assert util.invalid_import_path("../name") is not None
+
+ # Forbidden ends
+ assert util.invalid_import_path("name/") is not None
+ assert util.invalid_import_path("name/.") is not None
+ assert util.invalid_import_path("name/..") is not None
+
+ # Forbidden middles
+ assert util.invalid_import_path("name//name") is not None
+ assert util.invalid_import_path("name///name") is not None
+ assert util.invalid_import_path("name/./name") is not None
+ assert util.invalid_import_path("name/../name") is not None
+
+ # These are fine
+ assert util.invalid_import_path("name") is None
+ assert util.invalid_import_path("name/name") is None
+ assert util.invalid_import_path("name/.../name") is None
diff --git a/tests/daemon/test_auto_import.py b/tests/daemon/test_auto_import.py
index 9e2a158..676e9d0 100644
--- a/tests/daemon/test_auto_import.py
+++ b/tests/daemon/test_auto_import.py
@@ -104,6 +104,24 @@ def test_import_file_no_detect(dbtables, unode):
ArchiveAcq.get(name="acq")
+def test_import_file_invalid_acqname(dbtables, unode):
+ """Test invalid acq_name from import_detect."""
+
+ with patch(
+ "alpenhorn.common.extensions._id_ext", [lambda path, node: ("acq/", None)]
+ ):
+ with pytest.raises(StopIteration):
+ next(
+ auto_import._import_file(
+ None, unode, pathlib.PurePath("acq/file"), True, None
+ )
+ )
+
+ # No acq has been added
+ with pytest.raises(pw.DoesNotExist):
+ ArchiveAcq.get(name="acq")
+
+
def test_import_file_locked(xfs, dbtables, unode):
"""Test bad file in _import_file()"""
diff --git a/tests/daemon/test_update_node.py b/tests/daemon/test_update_node.py
index 7c0704d..2cd3db1 100644
--- a/tests/daemon/test_update_node.py
+++ b/tests/daemon/test_update_node.py
@@ -514,6 +514,52 @@ def test_update_import_absolute(unode, queue, archivefileimportrequest):
assert ArchiveFileCopy.select().count() == 0
+def test_update_import_invalid_path(unode, queue, archivefileimportrequest):
+ """update_import() should ignore invalid paths."""
+
+ afir = archivefileimportrequest(path="./path/..", node=unode.db)
+
+ unode.update_import()
+
+ # Nothing queued
+ assert queue.qsize == 0
+
+ # Request is completed
+ assert ArchiveFileImportRequest.get(id=afir.id).completed == 1
+
+
+def test_update_import_outtree_scan(unode, xfs, queue, archivefileimportrequest):
+ """Test rejection of out-of-tree scan paths from import requests"""
+
+ xfs.create_dir("/node/import")
+
+ afir = archivefileimportrequest(path="..", node=unode.db, recurse=True)
+
+ unode.update_import()
+
+ # Nothing queued
+ assert queue.qsize == 0
+
+ # Request is completed
+ assert ArchiveFileImportRequest.get(id=afir.id).completed == 1
+
+
+def test_update_import_unresolve_scan(unode, xfs, queue, archivefileimportrequest):
+ """Test rejection of unresolvable scan paths from import requests"""
+
+ xfs.create_dir("/node/import")
+
+ afir = archivefileimportrequest(path="path/subpath/..", node=unode.db, recurse=True)
+
+ unode.update_import()
+
+ # Nothing queued
+ assert queue.qsize == 0
+
+ # Request is completed
+ assert ArchiveFileImportRequest.get(id=afir.id).completed == 1
+
+
def test_update_import_no_recurse(unode, queue, archivefileimportrequest):
"""Test update_import() with a non-recursive import request."""
@@ -532,9 +578,11 @@ def test_update_import_no_recurse(unode, queue, archivefileimportrequest):
mock.assert_called_with(unode, queue, "import/path", True, afir)
-def test_update_import_scan(unode, queue, archivefileimportrequest):
+def test_update_import_scan(unode, xfs, queue, archivefileimportrequest):
"""Test update_import() with a recursive import request."""
+ xfs.create_dir("/node/import/path")
+
afir = archivefileimportrequest(
path="import/path", node=unode.db, recurse=True, register=True
)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 5
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.11",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/radiocosmology/alpenhorn.git@dd9cfffff57cd83f7dc059e20ed3280c60ca0bba#egg=alpenhorn
bcrypt==4.3.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
chimedb @ git+https://github.com/chime-experiment/chimedb.git@d82f48eb0599393723e7ee5d756aff6c6830db32
click==8.1.8
concurrent-log-handler==0.9.25
cryptography==44.0.2
docker==7.1.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mysql-connector-python==8.0.29
packaging @ file:///croot/packaging_1734472117206/work
paramiko==3.5.1
peewee==3.17.9
pluggy @ file:///croot/pluggy_1733169602837/work
portalocker==3.1.1
protobuf==6.30.2
pycparser==2.22
pyfakefs==5.8.0
PyNaCl==1.5.0
pytest @ file:///croot/pytest_1738938843180/work
PyYAML==6.0.2
requests==2.32.3
sshtunnel==0.4.0
tabulate==0.9.0
ujson==5.10.0
urllib3==2.3.0
watchdog==6.0.0
| name: alpenhorn
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py311h06a4308_0
- pip=25.0=py311h06a4308_0
- pluggy=1.5.0=py311h06a4308_0
- pytest=8.3.4=py311h06a4308_0
- python=3.11.11=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py311h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py311h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alpenhorn==2.0.0a1
- bcrypt==4.3.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- chimedb==24.8.0.post2+git.d82f48eb
- click==8.1.8
- concurrent-log-handler==0.9.25
- cryptography==44.0.2
- docker==7.1.0
- idna==3.10
- mysql-connector-python==8.0.29
- paramiko==3.5.1
- peewee==3.17.9
- portalocker==3.1.1
- protobuf==6.30.2
- pycparser==2.22
- pyfakefs==5.8.0
- pynacl==1.5.0
- pyyaml==6.0.2
- requests==2.32.3
- sshtunnel==0.4.0
- tabulate==0.9.0
- ujson==5.10.0
- urllib3==2.3.0
- watchdog==6.0.0
prefix: /opt/conda/envs/alpenhorn
| [
"tests/cli/acq/test_create.py::test_bad_name",
"tests/cli/file/test_create.py::test_bad_name",
"tests/common/test_util.py::test_invalid_import_path",
"tests/daemon/test_update_node.py::test_update_import_invalid_path",
"tests/daemon/test_update_node.py::test_update_import_outtree_scan",
"tests/daemon/test_update_node.py::test_update_import_unresolve_scan"
] | [
"tests/cli/file/test_create.py::test_file_not_file",
"tests/cli/file/test_create.py::test_file_access",
"tests/cli/file/test_create.py::test_from_file",
"tests/cli/file/test_create.py::test_from_file_relprefix",
"tests/cli/file/test_create.py::test_from_file_absprefix",
"tests/daemon/test_auto_import.py::test_import_file_not_ready",
"tests/daemon/test_auto_import.py::test_import_file_locked",
"tests/daemon/test_auto_import.py::test_import_file_create",
"tests/daemon/test_auto_import.py::test_import_file_no_register",
"tests/daemon/test_auto_import.py::test_import_file_callback",
"tests/daemon/test_auto_import.py::test_import_file_exists",
"tests/daemon/test_auto_import.py::test_import_file_missing",
"tests/daemon/test_auto_import.py::test_scan_new",
"tests/daemon/test_auto_import.py::test_scan_exists",
"tests/daemon/test_auto_import.py::test_scan_file"
] | [
"tests/cli/acq/test_create.py::test_create",
"tests/cli/acq/test_create.py::test_create_existing",
"tests/cli/file/test_create.py::test_no_data",
"tests/cli/file/test_create.py::test_negative_size",
"tests/cli/file/test_create.py::test_bad_md5",
"tests/cli/file/test_create.py::test_prefix_no_scan",
"tests/cli/file/test_create.py::test_data_clash",
"tests/cli/file/test_create.py::test_no_acq",
"tests/cli/file/test_create.py::test_exists",
"tests/cli/file/test_create.py::test_with_data",
"tests/cli/file/test_create.py::test_file_not_found",
"tests/common/test_util.py::test_run_retval0",
"tests/common/test_util.py::test_run_retval1",
"tests/common/test_util.py::test_run_stdout",
"tests/common/test_util.py::test_run_stderr",
"tests/common/test_util.py::test_run_timeout",
"tests/common/test_util.py::test_md5sum_file",
"tests/common/test_util.py::test_gethostname_config",
"tests/common/test_util.py::test_gethostname_default",
"tests/common/test_util.py::test_pretty_bytes",
"tests/common/test_util.py::test_pretty_deltat",
"tests/daemon/test_auto_import.py::test_import_file_bad_paths",
"tests/daemon/test_auto_import.py::test_import_file_queue",
"tests/daemon/test_auto_import.py::test_import_file_no_ext",
"tests/daemon/test_auto_import.py::test_import_file_no_detect",
"tests/daemon/test_auto_import.py::test_import_file_invalid_acqname",
"tests/daemon/test_auto_import.py::test_empty_stop_observers",
"tests/daemon/test_auto_import.py::test_update_observers_start",
"tests/daemon/test_auto_import.py::test_update_observers_stop",
"tests/daemon/test_auto_import.py::test_update_observers_force_stop",
"tests/daemon/test_update_node.py::test_bad_ioclass",
"tests/daemon/test_update_node.py::test_reinit",
"tests/daemon/test_update_node.py::test_bad_ioconfig",
"tests/daemon/test_update_node.py::test_idle",
"tests/daemon/test_update_node.py::test_check_init",
"tests/daemon/test_update_node.py::test_update_do_init",
"tests/daemon/test_update_node.py::test_update_free_space",
"tests/daemon/test_update_node.py::test_auto_verify",
"tests/daemon/test_update_node.py::test_auto_verify_time",
"tests/daemon/test_update_node.py::test_update_idle",
"tests/daemon/test_update_node.py::test_update_delete_under_min",
"tests/daemon/test_update_node.py::test_update_delete_over_min",
"tests/daemon/test_update_node.py::test_update_delete_transfer_pending",
"tests/daemon/test_update_node.py::test_update_delete_bad_file",
"tests/daemon/test_update_node.py::test_update_node_run",
"tests/daemon/test_update_node.py::test_update_import_absolute",
"tests/daemon/test_update_node.py::test_update_import_no_recurse",
"tests/daemon/test_update_node.py::test_update_import_scan",
"tests/daemon/test_update_node.py::test_update_import_alpenhorn_node"
] | [] | MIT License | 20,347 | 1,599 | [
"alpenhorn/cli/acq/create.py",
"alpenhorn/cli/file/create.py",
"alpenhorn/common/util.py",
"alpenhorn/daemon/auto_import.py",
"alpenhorn/daemon/update.py"
] |
tobymao__sqlglot-4459 | 73825d2d11cefaabd2ed73c3dcb9184393b6c042 | 2024-11-28 13:14:16 | 3945acc4a0dfd58147de929c9a2c71734d8f1ade | diff --git a/sqlglot/dialects/bigquery.py b/sqlglot/dialects/bigquery.py
index 2f3ac53e..ec8188bb 100644
--- a/sqlglot/dialects/bigquery.py
+++ b/sqlglot/dialects/bigquery.py
@@ -319,7 +319,7 @@ def _build_format_time(expr_type: t.Type[exp.Expression]) -> t.Callable[[t.List]
def _build_contains_substring(args: t.List) -> exp.Contains | exp.Anonymous:
if len(args) == 3:
- return exp.Anonymous(this="CONTAINS_SUBSTRING", expressions=args)
+ return exp.Anonymous(this="CONTAINS_SUBSTR", expressions=args)
# Lowercase the operands in case of transpilation, as exp.Contains
# is case-sensitive on other dialects
@@ -492,7 +492,7 @@ class BigQuery(Dialect):
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
- "CONTAINS_SUBSTRING": _build_contains_substring,
+ "CONTAINS_SUBSTR": _build_contains_substring,
"DATE": _build_date,
"DATE_ADD": build_date_delta_with_interval(exp.DateAdd),
"DATE_SUB": build_date_delta_with_interval(exp.DateSub),
@@ -1219,4 +1219,4 @@ class BigQuery(Dialect):
this = this.this
expr = expr.this
- return self.func("CONTAINS_SUBSTRING", this, expr)
+ return self.func("CONTAINS_SUBSTR", this, expr)
diff --git a/sqlglot/dialects/presto.py b/sqlglot/dialects/presto.py
index 056d974c..a87a5fce 100644
--- a/sqlglot/dialects/presto.py
+++ b/sqlglot/dialects/presto.py
@@ -376,6 +376,7 @@ class Presto(Dialect):
exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]),
exp.CurrentTime: lambda *_: "CURRENT_TIME",
exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
+ exp.CurrentUser: lambda *_: "CURRENT_USER",
exp.DateAdd: _date_delta_sql("DATE_ADD"),
exp.DateDiff: lambda self, e: self.func(
"DATE_DIFF", unit_to_str(e), e.expression, e.this
| Trino CURRENT_USER does not allow parenthesis
```python
from sqlglot import parse_one
from sqlglot.dialects import Trino
sql_in = parse_one("SELECT CURRENT_USER", dialect=Trino)
print(sql_in.sql(dialect=Trino))
```
```bash
SELECT CURRENT_USER()
```
https://prestodb.io/docs/current/functions/session.html
https://trino.io/docs/current/functions/session.html
I believe that the Postgres dialect already implements the desired workaround for this function
https://github.com/tobymao/sqlglot/blob/v25.32.1/sqlglot/dialects/postgres.py#L527 | tobymao/sqlglot | diff --git a/tests/dialects/test_bigquery.py b/tests/dialects/test_bigquery.py
index 26b12a15..09f7673c 100644
--- a/tests/dialects/test_bigquery.py
+++ b/tests/dialects/test_bigquery.py
@@ -1608,11 +1608,11 @@ WHERE
)
self.validate_identity(
- "CONTAINS_SUBSTRING(a, b, json_scope => 'JSON_KEYS_AND_VALUES')"
+ "CONTAINS_SUBSTR(a, b, json_scope => 'JSON_KEYS_AND_VALUES')"
).assert_is(exp.Anonymous)
self.validate_all(
- """CONTAINS_SUBSTRING(a, b)""",
+ """CONTAINS_SUBSTR(a, b)""",
read={
"": "CONTAINS(a, b)",
"spark": "CONTAINS(a, b)",
@@ -1628,7 +1628,7 @@ WHERE
"snowflake": "CONTAINS(LOWER(a), LOWER(b))",
"duckdb": "CONTAINS(LOWER(a), LOWER(b))",
"oracle": "CONTAINS(LOWER(a), LOWER(b))",
- "bigquery": "CONTAINS_SUBSTRING(a, b)",
+ "bigquery": "CONTAINS_SUBSTR(a, b)",
},
)
diff --git a/tests/dialects/test_presto.py b/tests/dialects/test_presto.py
index c0870895..f39a0d5e 100644
--- a/tests/dialects/test_presto.py
+++ b/tests/dialects/test_presto.py
@@ -1071,6 +1071,18 @@ class TestPresto(Validator):
"databricks": "REGEXP_EXTRACT('abc', '(a)(b)(c)', 0)",
},
)
+ self.validate_all(
+ "CURRENT_USER",
+ read={
+ "presto": "CURRENT_USER",
+ "trino": "CURRENT_USER",
+ "snowflake": "CURRENT_USER()", # Although the ANSI standard is CURRENT_USER
+ },
+ write={
+ "trino": "CURRENT_USER",
+ "snowflake": "CURRENT_USER()",
+ },
+ )
def test_encode_decode(self):
self.validate_identity("FROM_UTF8(x, y)")
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 25.32 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
identify==2.6.9
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
Pygments==2.19.1
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.7.2
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@73825d2d11cefaabd2ed73c3dcb9184393b6c042#egg=sqlglot
tomli==2.2.1
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- identify==2.6.9
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pygments==2.19.1
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.7.2
- six==1.17.0
- tomli==2.2.1
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_bigquery.py::TestBigQuery::test_bigquery",
"tests/dialects/test_presto.py::TestPresto::test_presto"
] | [] | [
"tests/dialects/test_bigquery.py::TestBigQuery::test_convert",
"tests/dialects/test_bigquery.py::TestBigQuery::test_errors",
"tests/dialects/test_bigquery.py::TestBigQuery::test_format_temporal",
"tests/dialects/test_bigquery.py::TestBigQuery::test_gap_fill",
"tests/dialects/test_bigquery.py::TestBigQuery::test_group_concat",
"tests/dialects/test_bigquery.py::TestBigQuery::test_inline_constructor",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_extract",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_extract_array",
"tests/dialects/test_bigquery.py::TestBigQuery::test_json_object",
"tests/dialects/test_bigquery.py::TestBigQuery::test_merge",
"tests/dialects/test_bigquery.py::TestBigQuery::test_mod",
"tests/dialects/test_bigquery.py::TestBigQuery::test_models",
"tests/dialects/test_bigquery.py::TestBigQuery::test_null_ordering",
"tests/dialects/test_bigquery.py::TestBigQuery::test_pushdown_cte_column_names",
"tests/dialects/test_bigquery.py::TestBigQuery::test_range_type",
"tests/dialects/test_bigquery.py::TestBigQuery::test_regexp_extract",
"tests/dialects/test_bigquery.py::TestBigQuery::test_remove_precision_parameterized_types",
"tests/dialects/test_bigquery.py::TestBigQuery::test_rename_table",
"tests/dialects/test_bigquery.py::TestBigQuery::test_unix_seconds",
"tests/dialects/test_bigquery.py::TestBigQuery::test_unnest",
"tests/dialects/test_bigquery.py::TestBigQuery::test_user_defined_functions",
"tests/dialects/test_bigquery.py::TestBigQuery::test_warnings",
"tests/dialects/test_presto.py::TestPresto::test_cast",
"tests/dialects/test_presto.py::TestPresto::test_ddl",
"tests/dialects/test_presto.py::TestPresto::test_encode_decode",
"tests/dialects/test_presto.py::TestPresto::test_hex_unhex",
"tests/dialects/test_presto.py::TestPresto::test_interval_plural_to_singular",
"tests/dialects/test_presto.py::TestPresto::test_json",
"tests/dialects/test_presto.py::TestPresto::test_json_vs_row_extract",
"tests/dialects/test_presto.py::TestPresto::test_match_recognize",
"tests/dialects/test_presto.py::TestPresto::test_quotes",
"tests/dialects/test_presto.py::TestPresto::test_regex",
"tests/dialects/test_presto.py::TestPresto::test_signum",
"tests/dialects/test_presto.py::TestPresto::test_time",
"tests/dialects/test_presto.py::TestPresto::test_to_char",
"tests/dialects/test_presto.py::TestPresto::test_unicode_string",
"tests/dialects/test_presto.py::TestPresto::test_unnest"
] | [] | MIT License | 20,354 | 547 | [
"sqlglot/dialects/bigquery.py",
"sqlglot/dialects/presto.py"
] |
|
borgbackup__borg-8575 | 26b2ffc8a0135df18a6b97facae23e495bbe7c99 | 2024-11-29 20:10:12 | 4344e64436db61e8a35bfc5515720aadb2d3dad7 | ThomasWaldmann: BTW, don't add `coverage.*` files to the repo, these are temporary files.
alighazi288: Got it, I’ll ensure temporary files are omitted moving forward. Could you please share more details or context about the errors above? This would help me better understand and address the issue.
ThomasWaldmann: The failing `test_return_codes` test is because `borg extract` is used there and expected to finish with rc 0, but it crashes and returns rc 2.
alighazi288: Changed the code and now everything seems to be running as expected at my end and **borg extract** finishes with rc 0. Just wanted to ask if the errors could be because of me working on MacOS and not having pyfuse3?
ThomasWaldmann: The errors are unrelated to macOS and not having pyfuse3 - I also work on a macbook.
If one want to test or work on the mount / FUSE code in borg, one would need macFUSE installed and use llfuse (not pyfuse3). But this is not required here.
alighazi288: @ThomasWaldmann Please review.
ThomasWaldmann: Can you rather rebase on master please than merge master in here?
alighazi288: I wanted to let you know that there appears to be a large number of changes in this pull request because I encountered some issues while attempting to rebase on master. I apologize for any confusion this may have caused.
codecov[bot]: ## [Codecov](https://app.codecov.io/gh/borgbackup/borg/pull/8575?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup) Report
Attention: Patch coverage is `92.59259%` with `2 lines` in your changes missing coverage. Please review.
> Project coverage is 81.39%. Comparing base [(`694fa93`)](https://app.codecov.io/gh/borgbackup/borg/commit/694fa93b761a11f5a3f9b5a9e8227eabea2cb9e1?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup) to head [(`8404970`)](https://app.codecov.io/gh/borgbackup/borg/commit/8404970c5324c47bee6e804bef76140f4bf31f63?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup).
> Report is 4 commits behind head on master.
| [Files with missing lines](https://app.codecov.io/gh/borgbackup/borg/pull/8575?dropdown=coverage&src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup) | Patch % | Lines |
|---|---|---|
| [src/borg/archiver/extract\_cmd.py](https://app.codecov.io/gh/borgbackup/borg/pull/8575?src=pr&el=tree&filepath=src%2Fborg%2Farchiver%2Fextract_cmd.py&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup#diff-c3JjL2JvcmcvYXJjaGl2ZXIvZXh0cmFjdF9jbWQucHk=) | 92.59% | [2 Missing :warning: ](https://app.codecov.io/gh/borgbackup/borg/pull/8575?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup) |
<details><summary>Additional details and impacted files</summary>
```diff
@@ Coverage Diff @@
## master #8575 +/- ##
==========================================
- Coverage 81.84% 81.39% -0.45%
==========================================
Files 74 74
Lines 13311 13292 -19
Branches 1960 1959 -1
==========================================
- Hits 10894 10819 -75
- Misses 1755 1801 +46
- Partials 662 672 +10
```
</details>
[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/borgbackup/borg/pull/8575?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup).
:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup).
ThomasWaldmann: There is still that ".coverage 2" file in your PR, you need to remove it, it is a temporary file and must not be committed. | diff --git a/src/borg/archiver/extract_cmd.py b/src/borg/archiver/extract_cmd.py
index d0fafc1d..c7fc4b08 100644
--- a/src/borg/archiver/extract_cmd.py
+++ b/src/borg/archiver/extract_cmd.py
@@ -56,33 +56,50 @@ def do_extract(self, args, repository, manifest, archive):
else:
pi = None
- for item in archive.iter_items(filter):
- archive.preload_item_chunks(item, optimize_hardlinks=True)
+ for item in archive.iter_items():
orig_path = item.path
if strip_components:
- item.path = os.sep.join(orig_path.split(os.sep)[strip_components:])
- if not args.dry_run:
- while dirs and not item.path.startswith(dirs[-1].path):
- dir_item = dirs.pop(-1)
- try:
- archive.extract_item(dir_item, stdout=stdout)
- except BackupError as e:
- self.print_warning_instance(BackupWarning(remove_surrogates(dir_item.path), e))
+ stripped_path = os.sep.join(orig_path.split(os.sep)[strip_components:])
+ if not stripped_path:
+ continue
+ item.path = stripped_path
+
+ is_matched = matcher.match(orig_path)
+
if output_list:
- logging.getLogger("borg.output.list").info(remove_surrogates(item.path))
- try:
- if dry_run:
- archive.extract_item(item, dry_run=True, hlm=hlm, pi=pi)
- else:
- if stat.S_ISDIR(item.mode):
- dirs.append(item)
- archive.extract_item(item, stdout=stdout, restore_attrs=False)
+ log_prefix = "+" if is_matched else "-"
+ logging.getLogger("borg.output.list").info(f"{log_prefix} {remove_surrogates(item.path)}")
+
+ if is_matched:
+ archive.preload_item_chunks(item, optimize_hardlinks=True)
+
+ if not dry_run:
+ while dirs and not item.path.startswith(dirs[-1].path):
+ dir_item = dirs.pop(-1)
+ try:
+ archive.extract_item(dir_item, stdout=stdout)
+ except BackupError as e:
+ self.print_warning_instance(BackupWarning(remove_surrogates(dir_item.path), e))
+
+ try:
+ if dry_run:
+ archive.extract_item(item, dry_run=True, hlm=hlm, pi=pi)
else:
- archive.extract_item(
- item, stdout=stdout, sparse=sparse, hlm=hlm, pi=pi, continue_extraction=continue_extraction
- )
- except BackupError as e:
- self.print_warning_instance(BackupWarning(remove_surrogates(orig_path), e))
+ if stat.S_ISDIR(item.mode):
+ dirs.append(item)
+ archive.extract_item(item, stdout=stdout, restore_attrs=False)
+ else:
+ archive.extract_item(
+ item,
+ stdout=stdout,
+ sparse=sparse,
+ hlm=hlm,
+ pi=pi,
+ continue_extraction=continue_extraction,
+ )
+ except BackupError as e:
+ self.print_warning_instance(BackupWarning(remove_surrogates(orig_path), e))
+
if pi:
pi.finish()
| No dry-run flags when using borg extract
Using borg 1.4, I checked the use of "flags" when doing a dry-run for different borg commands.
The command `borg extract --dry-run --list` does not print the flags (that are used with e.g. borg create / borg recreate
Instead, `borg extract` prints only the list of items that would be included in the extract without the --dry-run.
This was the command, I used:
`borg extract REPO::ARCHIVE /home/user/test Admin -e 'home/user/test/*.bak' --dry-run --verbose --list `
In the name of consistency, dry-run flags could be added to borg extract.
| borgbackup/borg | diff --git a/src/borg/testsuite/archiver/extract_cmd_test.py b/src/borg/testsuite/archiver/extract_cmd_test.py
index b4f8d317..a6dd9632 100644
--- a/src/borg/testsuite/archiver/extract_cmd_test.py
+++ b/src/borg/testsuite/archiver/extract_cmd_test.py
@@ -718,3 +718,22 @@ def test_extract_continue(archivers, request):
assert f.read() == CONTENTS2
with open("input/file3", "rb") as f:
assert f.read() == CONTENTS3
+
+
+def test_dry_run_extraction_flags(archivers, request):
+ archiver = request.getfixturevalue(archivers)
+ cmd(archiver, "repo-create", RK_ENCRYPTION)
+ create_regular_file(archiver.input_path, "file1", 0)
+ create_regular_file(archiver.input_path, "file2", 0)
+ create_regular_file(archiver.input_path, "file3", 0)
+ cmd(archiver, "create", "test", "input")
+
+ output = cmd(archiver, "extract", "--dry-run", "--list", "test", "-e", "input/file3")
+
+ expected_output = ["+ input/file1", "+ input/file2", "- input/file3"]
+ output_lines = output.splitlines()
+ for expected in expected_output:
+ assert expected in output_lines, f"Expected line not found: {expected}"
+ print(output)
+
+ assert not os.listdir("output"), "Output directory should be empty after dry-run"
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-xdist",
"pytest-cov",
"pytest-benchmark"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc pkg-config build-essential libssl-dev libacl1-dev liblz4-dev libzstd-dev libxxhash-dev"
],
"python": "3.9",
"reqs_path": [
"requirements.d/development.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
-e git+https://github.com/borgbackup/borg.git@26b2ffc8a0135df18a6b97facae23e495bbe7c99#egg=borgbackup
borghash==0.1.0
borgstore==0.1.0
build==1.2.2.post1
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
cfgv==3.4.0
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
Cython==3.0.12
distlib==0.3.9
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
identify==2.6.9
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
msgpack==1.1.0
nodeenv==1.9.1
packaging==24.2
pkgconfig==1.5.5
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
py-cpuinfo==9.0.0
pycparser==2.22
pyproject-api==1.9.0
pyproject_hooks==1.2.0
pytest==8.3.5
pytest-benchmark==5.1.0
pytest-cov==6.0.0
pytest-xdist==3.6.1
PyYAML==6.0.2
requests==2.32.3
setuptools-scm==8.2.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: borg
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- borgbackup==2.0.0b15.dev19+g26b2ffc8
- borghash==0.1.0
- borgstore==0.1.0
- build==1.2.2.post1
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- cfgv==3.4.0
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- cython==3.0.12
- distlib==0.3.9
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- identify==2.6.9
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- msgpack==1.1.0
- nodeenv==1.9.1
- packaging==24.2
- pkgconfig==1.5.5
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- py-cpuinfo==9.0.0
- pycparser==2.22
- pyproject-api==1.9.0
- pyproject-hooks==1.2.0
- pytest==8.3.5
- pytest-benchmark==5.1.0
- pytest-cov==6.0.0
- pytest-xdist==3.6.1
- pyyaml==6.0.2
- requests==2.32.3
- setuptools-scm==8.2.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/borg
| [
"src/borg/testsuite/archiver/extract_cmd_test.py::test_dry_run_extraction_flags[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_dry_run_extraction_flags[remote_archiver]"
] | [] | [
"src/borg/testsuite/archiver/extract_cmd_test.py::test_symlink_extract[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_symlink_extract[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_hardlinked_symlinks_extract[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_hardlinked_symlinks_extract[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_directory_timestamps1[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_directory_timestamps1[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_directory_timestamps2[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_directory_timestamps2[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_directory_timestamps3[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_directory_timestamps3[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_atime[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_atime[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_sparse_file[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_sparse_file[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_unusual_filenames[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_unusual_filenames[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_strip_components[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_strip_components[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_hardlinks1[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_hardlinks1[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_hardlinks2[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_hardlinks2[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_hardlinks_twice[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_hardlinks_twice[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_include_exclude[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_include_exclude[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_include_exclude_regex[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_include_exclude_regex[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_include_exclude_regex_from_file[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_include_exclude_regex_from_file[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_with_pattern[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_with_pattern[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_list_output[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_list_output[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_progress[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_progress[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_pattern_opt[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_pattern_opt[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_overwrite[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_overwrite[remote_archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_continue[archiver]",
"src/borg/testsuite/archiver/extract_cmd_test.py::test_extract_continue[remote_archiver]"
] | [] | BSD License | 20,366 | 727 | [
"src/borg/archiver/extract_cmd.py"
] |
materialsvirtuallab__monty-728 | a3d35a63d911127a2bb0a8331f3b6265d249151c | 2024-11-30 23:15:21 | a3d35a63d911127a2bb0a8331f3b6265d249151c | diff --git a/src/monty/json.py b/src/monty/json.py
index d0626d3..7674ccf 100644
--- a/src/monty/json.py
+++ b/src/monty/json.py
@@ -992,7 +992,13 @@ def jsanitize(
if recursive_msonable:
try:
- return obj.as_dict()
+ return jsanitize(
+ obj.as_dict(),
+ strict=strict,
+ allow_bson=allow_bson,
+ enum_values=enum_values,
+ recursive_msonable=recursive_msonable,
+ )
except AttributeError:
pass
| [Bug]: The `jsanitize` function does not behave as expected with `recursive_msonable=True`
### Email (Optional)
_No response_
### Version
2024.10.21
### Which OS(es) are you using?
- [ ] MacOS
- [X] Windows
- [X] Linux
### What happened?
Currently, `jsanitize(recursive_msonable=True)` does not (as the name might suggest) recursively crawl an MSONable object and make it fully sanitized. If an `MSONable` object is within an attribute of the parent `MSONable` object, the former will not be sanitized.
For instance, try the following:
```python
from monty.json import jsanitize
from pymatgen.core import Structure
from copy import deepcopy
structure = Structure(
lattice=[[0, 2.13, 2.13], [2.13, 0, 2.13], [2.13, 2.13, 0]],
species=["Mg", "O"],
coords=[[0, 0, 0], [0.5, 0.5, 0.5]],
)
structure.properties = {"test": deepcopy(structure)}
sanitized = jsanitize(structure, recursive_msonable=True)
```
You will see the output is as follows:
```python
{'@module': 'pymatgen.core.structure',
'@class': 'Structure',
'charge': 0,
'lattice': {'matrix': [[0.0, 2.13, 2.13],
[2.13, 0.0, 2.13],
[2.13, 2.13, 0.0]],
'pbc': (True, True, True),
'a': 3.012274887854692,
'b': 3.012274887854692,
'c': 3.012274887854692,
'alpha': 60.00000000000001,
'beta': 60.00000000000001,
'gamma': 60.00000000000001,
'volume': 19.327193999999995},
'properties': {'test': Structure Summary
Lattice
abc : 3.012274887854692 3.012274887854692 3.012274887854692
angles : 60.00000000000001 60.00000000000001 60.00000000000001
volume : 19.327193999999995
A : 0.0 2.13 2.13
B : 2.13 0.0 2.13
C : 2.13 2.13 0.0
pbc : True True True
PeriodicSite: Mg (0.0, 0.0, 0.0) [0.0, 0.0, 0.0]
PeriodicSite: O (2.13, 2.13, 2.13) [0.5, 0.5, 0.5]},
'sites': [{'species': [{'element': 'Mg', 'occu': 1}],
'abc': [0.0, 0.0, 0.0],
'properties': {},
'label': 'Mg',
'xyz': [0.0, 0.0, 0.0]},
{'species': [{'element': 'O', 'occu': 1}],
'abc': [0.5, 0.5, 0.5],
'properties': {},
'label': 'O',
'xyz': [2.13, 2.13, 2.13]}]}
```
Note how the `properties` key has an actual `Structure` object in it.
### Code snippet
_No response_
### Log output
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct | materialsvirtuallab/monty | diff --git a/tests/test_json.py b/tests/test_json.py
index e59e89d..e91e6ef 100644
--- a/tests/test_json.py
+++ b/tests/test_json.py
@@ -794,6 +794,18 @@ class TestJson:
assert clean_recursive_msonable["hello"][1] == "test"
assert clean_recursive_msonable["test"] == "hi"
+ DoubleGoodMSONClass = GoodMSONClass(1, 2, 3)
+ DoubleGoodMSONClass.values = [GoodMSONClass(1, 2, 3)]
+ clean_recursive_msonable = jsanitize(
+ DoubleGoodMSONClass, recursive_msonable=True
+ )
+ assert clean_recursive_msonable["a"] == 1
+ assert clean_recursive_msonable["b"] == 2
+ assert clean_recursive_msonable["c"] == 3
+ assert clean_recursive_msonable["values"][0]["a"] == 1
+ assert clean_recursive_msonable["values"][0]["b"] == 2
+ assert clean_recursive_msonable["values"][0]["c"] == 3
+
d = {"dt": datetime.datetime.now()}
clean = jsanitize(d)
assert isinstance(clean["dt"], str)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 2024.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest>=8",
"pytest-cov>=4",
"types-requests",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asttokens==3.0.0
coverage==7.8.0
decorator==5.2.1
exceptiongroup==1.2.2
executing==2.2.0
iniconfig==2.1.0
ipython==8.18.1
jedi==0.19.2
matplotlib-inline==0.1.7
-e git+https://github.com/materialsvirtuallab/monty.git@a3d35a63d911127a2bb0a8331f3b6265d249151c#egg=monty
numpy==1.26.4
packaging==24.2
parso==0.8.4
pexpect==4.9.0
pluggy==1.5.0
prompt_toolkit==3.0.50
ptyprocess==0.7.0
pure_eval==0.2.3
Pygments==2.19.1
pytest==8.3.5
pytest-cov==6.0.0
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
stack-data==0.6.3
tomli==2.2.1
traitlets==5.14.3
types-requests==2.32.0.20250328
typing_extensions==4.13.0
urllib3==2.3.0
wcwidth==0.2.13
| name: monty
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asttokens==3.0.0
- coverage==7.8.0
- decorator==5.2.1
- exceptiongroup==1.2.2
- executing==2.2.0
- iniconfig==2.1.0
- ipython==8.18.1
- jedi==0.19.2
- matplotlib-inline==0.1.7
- monty==2024.10.21
- numpy==1.26.4
- packaging==24.2
- parso==0.8.4
- pexpect==4.9.0
- pluggy==1.5.0
- prompt-toolkit==3.0.50
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pygments==2.19.1
- pytest==8.3.5
- pytest-cov==6.0.0
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.12
- stack-data==0.6.3
- tomli==2.2.1
- traitlets==5.14.3
- types-requests==2.32.0.20250328
- typing-extensions==4.13.0
- urllib3==2.3.0
- wcwidth==0.2.13
prefix: /opt/conda/envs/monty
| [
"tests/test_json.py::TestJson::test_jsanitize"
] | [] | [
"tests/test_json.py::TestMSONable::test_to_from_dict",
"tests/test_json.py::TestMSONable::test_kw_only_args",
"tests/test_json.py::TestMSONable::test_unsafe_hash",
"tests/test_json.py::TestMSONable::test_version",
"tests/test_json.py::TestMSONable::test_nested_to_from_dict",
"tests/test_json.py::TestMSONable::test_enum_serialization",
"tests/test_json.py::TestMSONable::test_enum_serialization_no_msonable",
"tests/test_json.py::TestMSONable::test_save_load",
"tests/test_json.py::TestJson::test_as_from_dict",
"tests/test_json.py::TestJson::test_datetime",
"tests/test_json.py::TestJson::test_uuid",
"tests/test_json.py::TestJson::test_path",
"tests/test_json.py::TestJson::test_nan",
"tests/test_json.py::TestJson::test_numpy",
"tests/test_json.py::TestJson::test_callable",
"tests/test_json.py::TestJson::test_redirect",
"tests/test_json.py::TestJson::test_redirect_settings_file",
"tests/test_json.py::TestJson::test_dataclass",
"tests/test_json.py::TestJson::test_enum"
] | [] | MIT License | 20,371 | 154 | [
"src/monty/json.py"
] |
|
tefra__xsdata-1098 | 6e3f505bf9ed57d6209dad5afc4a7bdf90bc5fe8 | 2024-12-01 04:37:13 | 474b072cd05c2f7368ad1680aff7515b9d009f0a | sonarcloud[bot]: ## [](https://sonarcloud.io/dashboard?id=tefra_xsdata&pullRequest=1098) **Quality Gate passed**
Issues
 [0 New issues](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=1098&issueStatuses=OPEN,CONFIRMED&sinceLeakPeriod=true)
 [0 Accepted issues](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=1098&issueStatuses=ACCEPTED)
Measures
 [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=tefra_xsdata&pullRequest=1098&issueStatuses=OPEN,CONFIRMED&sinceLeakPeriod=true)
 [0.0% Coverage on New Code](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=1098&metric=new_coverage&view=list)
 [0.0% Duplication on New Code](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=1098&metric=new_duplicated_lines_density&view=list)
[See analysis details on SonarQube Cloud](https://sonarcloud.io/dashboard?id=tefra_xsdata&pullRequest=1098)
sonarcloud[bot]: ## [](https://sonarcloud.io/dashboard?id=tefra_xsdata&pullRequest=1098) **Quality Gate passed**
Issues
 [0 New issues](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=1098&issueStatuses=OPEN,CONFIRMED&sinceLeakPeriod=true)
 [0 Accepted issues](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=1098&issueStatuses=ACCEPTED)
Measures
 [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=tefra_xsdata&pullRequest=1098&issueStatuses=OPEN,CONFIRMED&sinceLeakPeriod=true)
 [0.0% Coverage on New Code](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=1098&metric=new_coverage&view=list)
 [0.0% Duplication on New Code](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=1098&metric=new_duplicated_lines_density&view=list)
[See analysis details on SonarQube Cloud](https://sonarcloud.io/dashboard?id=tefra_xsdata&pullRequest=1098)
codecov[bot]: ## [Codecov](https://app.codecov.io/gh/tefra/xsdata/pull/1098?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou) Report
All modified and coverable lines are covered by tests :white_check_mark:
> Project coverage is 99.95%. Comparing base [(`6e3f505`)](https://app.codecov.io/gh/tefra/xsdata/commit/6e3f505bf9ed57d6209dad5afc4a7bdf90bc5fe8?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou) to head [(`360ba1a`)](https://app.codecov.io/gh/tefra/xsdata/commit/360ba1afc855690c3f243c0e183bf55c2b7d22e1?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou).
<details><summary>Additional details and impacted files</summary>
```diff
@@ Coverage Diff @@
## main #1098 +/- ##
===========================================
- Coverage 100.00% 99.95% -0.05%
===========================================
Files 115 115
Lines 9279 9280 +1
Branches 1417 1417
===========================================
- Hits 9279 9276 -3
- Misses 0 3 +3
- Partials 0 1 +1
```
</details>
[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/tefra/xsdata/pull/1098?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou).
:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou).
sonarcloud[bot]: ## [](https://sonarcloud.io/dashboard?id=tefra_xsdata&pullRequest=1098) **Quality Gate passed**
Issues
 [0 New issues](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=1098&issueStatuses=OPEN,CONFIRMED&sinceLeakPeriod=true)
 [0 Accepted issues](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=1098&issueStatuses=ACCEPTED)
Measures
 [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=tefra_xsdata&pullRequest=1098&issueStatuses=OPEN,CONFIRMED&sinceLeakPeriod=true)
 [0.0% Coverage on New Code](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=1098&metric=new_coverage&view=list)
 [0.0% Duplication on New Code](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=1098&metric=new_duplicated_lines_density&view=list)
[See analysis details on SonarQube Cloud](https://sonarcloud.io/dashboard?id=tefra_xsdata&pullRequest=1098)
| diff --git a/xsdata/formats/dataclass/filters.py b/xsdata/formats/dataclass/filters.py
index 574a47b7..ed05d150 100644
--- a/xsdata/formats/dataclass/filters.py
+++ b/xsdata/formats/dataclass/filters.py
@@ -256,6 +256,9 @@ class Filters:
if attr.fixed or attr.is_prohibited:
kwargs["init"] = False
+ if attr.is_prohibited:
+ kwargs[self.DEFAULT_KEY] = None
+
if default_value is not False and not attr.is_prohibited:
key = self.FACTORY_KEY if attr.is_factory else self.DEFAULT_KEY
kwargs[key] = default_value
| xsdata seems to have an issue with metadata = {"type": "ignore"}
[testcase.zip](https://github.com/user-attachments/files/17680506/testcase.zip)
Running the code below result into.
```
This works
WheelchairAccess(value=<LimitationStatusEnumeration.FALSE: 'false'>)
This fails
Told you so...
Traceback (most recent call last):
File "/tmp/xsdata_bug.py", line 22, in <module>
print(site_frame.stop_places.stop_place[0].accessibility_assessment.limitations.accessibility_limitation)
File "/usr/lib/python3.12/dataclasses.py", line 262, in wrapper
result = user_function(self)
^^^^^^^^^^^^^^^^^^^
File "<string>", line 3, in __repr__
AttributeError: 'AccessibilityLimitation' object has no attribute 'validity_conditions_or_valid_between'
```
Removing the attributes with type=ignore, the code functions as expected.
```python
@dataclass(kw_only=True)
class AccessibilityLimitation(AccessibilityLimitationVersionedChildStructure):
class Meta:
namespace = "http://www.netex.org.uk/netex"
validity_conditions_or_valid_between: Any = field(
init=False,
metadata={
"type": "Ignore",
},
)
alternative_texts: Any = field(
init=False,
metadata={
"type": "Ignore",
},
)
```
```python
from pathlib import Path
from xsdata.formats.dataclass.context import XmlContext
from xsdata.formats.dataclass.parsers import XmlParser
from xsdata.formats.dataclass.parsers.config import ParserConfig
from xsdata.formats.dataclass.parsers.handlers import LxmlEventHandler
from netex import PublicationDelivery, CompositeFrame, SiteFrame
context = XmlContext()
config = ParserConfig(fail_on_unknown_properties=False)
parser = XmlParser(context=context, config=config, handler=LxmlEventHandler)
pd = parser.parse(Path("/tmp/smaller.xml"), PublicationDelivery)
composite_frame: CompositeFrame = pd.data_objects.choice[0]
site_frame: SiteFrame = composite_frame.frames.common_frame[1]
print("This works")
print(site_frame.stop_places.stop_place[0].accessibility_assessment.limitations.accessibility_limitation.wheelchair_access)
print("This fails")
try:
print(site_frame.stop_places.stop_place[0].accessibility_assessment.limitations.accessibility_limitation)
except:
print("Told you so...")
raise
print("Therefore this fails too")
try:
print(site_frame.stop_places.stop_place[0])
except:
print("Told you so...")
raise
``` | tefra/xsdata | diff --git a/tests/formats/dataclass/test_filters.py b/tests/formats/dataclass/test_filters.py
index eeb4ea31..ba6c1dbd 100644
--- a/tests/formats/dataclass/test_filters.py
+++ b/tests/formats/dataclass/test_filters.py
@@ -281,6 +281,7 @@ class FiltersTests(FactoryTestCase):
expected = (
"field(\n"
" init=False,\n"
+ " default=None,\n"
" metadata={\n"
' "type": "Ignore",\n'
" }\n"
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 24.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[cli,lxml,soap]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-benchmark",
"pytest-cov"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
click-default-group==1.2.4
coverage==7.8.0
docformatter==1.7.5
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
lxml==5.3.1
MarkupSafe==3.0.2
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
py-cpuinfo==9.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-benchmark==5.1.0
pytest-cov==6.0.0
requests==2.32.3
ruff==0.11.2
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
toposort==1.10
typing_extensions==4.13.0
untokenize==0.1.1
urllib3==2.3.0
-e git+https://github.com/tefra/xsdata.git@6e3f505bf9ed57d6209dad5afc4a7bdf90bc5fe8#egg=xsdata
| name: xsdata
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- click-default-group==1.2.4
- coverage==7.8.0
- docformatter==1.7.5
- idna==3.10
- jinja2==3.1.6
- lxml==5.3.1
- markupsafe==3.0.2
- py-cpuinfo==9.0.0
- pytest-benchmark==5.1.0
- pytest-cov==6.0.0
- requests==2.32.3
- ruff==0.11.2
- toposort==1.10
- typing-extensions==4.13.0
- untokenize==0.1.1
- urllib3==2.3.0
- xsdata==24.11
prefix: /opt/conda/envs/xsdata
| [
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_definition_with_prohibited_attr"
] | [] | [
"tests/formats/dataclass/test_filters.py::FiltersTests::test__init",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_apply_substitutions_with_regexes",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_build_class_annotation",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_choice_type",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_choice_type_with_circular_reference",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_choice_type_with_forward_reference",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_choice_type_with_list_types_are_ignored",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_choice_type_with_multiple_types",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_choice_type_with_restrictions_tokens_true",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_class_annotations",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_class_bases",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_class_name",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_constant_name",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_constant_value",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_default_imports_combo",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_default_imports_with_annotations",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_default_imports_with_builtin_datatype",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_default_imports_with_dataclasses",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_default_imports_with_decimal",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_default_imports_with_enum",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_default_imports_with_module",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_default_imports_with_qname",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_default_imports_with_typing",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_choices",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_default_value_with_any_attribute",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_default_value_with_array_type",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_default_value_with_multiple_types",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_default_value_with_type_bool",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_default_value_with_type_decimal",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_default_value_with_type_enum",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_default_value_with_type_float",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_default_value_with_type_int",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_default_value_with_type_qname",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_default_value_with_type_str",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_default_value_with_type_tokens",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_default_value_with_value_none",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_default_value_with_xml_duration",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_definition",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_definition_with_restriction_pattern",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_definition_without_metadata",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_metadata",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_metadata_choices",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_metadata_mixed",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_metadata_name",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_metadata_namespace",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_metadata_restrictions",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_metadata_wrapper",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_name",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_type_with_alias",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_type_with_any_attribute",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_type_with_array_type",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_type_with_circular_reference",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_type_with_compound_attr",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_type_with_default_value",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_type_with_forward_reference",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_type_with_multiple_types",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_type_with_native_type",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_type_with_optional_value",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_type_with_prohibited_attr",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_field_type_with_token_attr",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_format_metadata",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_import_module",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_module_name",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_package_name",
"tests/formats/dataclass/test_filters.py::FiltersTests::test_type_name"
] | [] | MIT License | 20,373 | 171 | [
"xsdata/formats/dataclass/filters.py"
] |
tefra__xsdata-1099 | 95755fe11e253b38ee1df5112880a0529203a15f | 2024-12-01 06:02:54 | 474b072cd05c2f7368ad1680aff7515b9d009f0a | sonarcloud[bot]: ## [](https://sonarcloud.io/dashboard?id=tefra_xsdata&pullRequest=1099) **Quality Gate passed**
Issues
 [0 New issues](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=1099&issueStatuses=OPEN,CONFIRMED&sinceLeakPeriod=true)
 [0 Accepted issues](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=1099&issueStatuses=ACCEPTED)
Measures
 [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=tefra_xsdata&pullRequest=1099&issueStatuses=OPEN,CONFIRMED&sinceLeakPeriod=true)
 [0.0% Coverage on New Code](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=1099&metric=new_coverage&view=list)
 [0.0% Duplication on New Code](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=1099&metric=new_duplicated_lines_density&view=list)
[See analysis details on SonarQube Cloud](https://sonarcloud.io/dashboard?id=tefra_xsdata&pullRequest=1099)
codecov[bot]: ## [Codecov](https://app.codecov.io/gh/tefra/xsdata/pull/1099?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou) Report
All modified and coverable lines are covered by tests :white_check_mark:
> Project coverage is 99.75%. Comparing base [(`95755fe`)](https://app.codecov.io/gh/tefra/xsdata/commit/95755fe11e253b38ee1df5112880a0529203a15f?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou) to head [(`31c22a5`)](https://app.codecov.io/gh/tefra/xsdata/commit/31c22a51c3b15459c4aec592b8ff54ac0aaa9b32?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou).
<details><summary>Additional details and impacted files</summary>
```diff
@@ Coverage Diff @@
## main #1099 +/- ##
===========================================
- Coverage 100.00% 99.75% -0.25%
===========================================
Files 115 115
Lines 9281 9283 +2
Branches 1418 1419 +1
===========================================
- Hits 9281 9260 -21
- Misses 0 19 +19
- Partials 0 4 +4
```
</details>
[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/tefra/xsdata/pull/1099?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou).
:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou).
| diff --git a/xsdata/codegen/mappers/definitions.py b/xsdata/codegen/mappers/definitions.py
index 1a5f8a88..c3402c38 100644
--- a/xsdata/codegen/mappers/definitions.py
+++ b/xsdata/codegen/mappers/definitions.py
@@ -259,6 +259,9 @@ class DefinitionsMapper:
),
)
+ for attr in body.attrs:
+ attr.restrictions.min_occurs = 0
+
@classmethod
def build_envelope_class(
cls,
| SOAP Body sometimes generated with mandatory `fault` field
**Context**
I am currently investigating if I can use xsdata for consuming a SOAP service that I have to integrate with. So far, I see a lot of good stuff. I'm using xsdata-pydantic, and am very happy with the generated Pydantic schema classes. I was quickly able to build a request to send using these.
**Issue**
Parsing the response using the built-in Client fails. When using `client.send(...)`, validation fails on the output message that is returned from the service:
```
1 validation error for Body
fault
Field required [type=missing, input_value={'can_response': CanRespo...cel.'), request_uid='')}, input_type=dict]
For further information visit https://errors.pydantic.dev/2.9/v/missing
```
It tells me that the `fault` field is required, but the service does not return a fault element.
**My current work-around**
I found out that the broken behavior is related to `kw_only = True` (see my updated in this thread). I was able to get SOAP calls working by:
- not using `pydantic` but `dataclasses` as the format (because `pydantic` enforced `kw_only = True`;
- not using `xsdata --kw-only` for generating the schema classes.
| tefra/xsdata | diff --git a/tests/codegen/mappers/test_definitions.py b/tests/codegen/mappers/test_definitions.py
index 123059be..12de60b5 100644
--- a/tests/codegen/mappers/test_definitions.py
+++ b/tests/codegen/mappers/test_definitions.py
@@ -504,6 +504,9 @@ class DefinitionsMapperTests(FactoryTestCase):
self.assertEqual(expected_fault_attr, body.attrs[0])
self.assertEqual(expected_fault_attrs, body.inner[0].attrs)
+ for attr in body.attrs:
+ self.assertTrue(attr.is_optional)
+
def test_build_envelope_fault_with_detail_messages(self):
body = ClassFactory.create(qname="Body")
target = ClassFactory.create()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 24.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[cli,lxml,soap]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest pytest-benchmark pytest-cov",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
click-default-group==1.2.4
coverage==7.8.0
docformatter==1.7.5
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
lxml==5.3.1
MarkupSafe==3.0.2
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
py-cpuinfo==9.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-benchmark==5.1.0
pytest-cov==6.0.0
requests==2.32.3
ruff==0.11.2
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
toposort==1.10
typing_extensions==4.13.0
untokenize==0.1.1
urllib3==2.3.0
-e git+https://github.com/tefra/xsdata.git@95755fe11e253b38ee1df5112880a0529203a15f#egg=xsdata
| name: xsdata
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- click-default-group==1.2.4
- coverage==7.8.0
- docformatter==1.7.5
- idna==3.10
- jinja2==3.1.6
- lxml==5.3.1
- markupsafe==3.0.2
- py-cpuinfo==9.0.0
- pytest-benchmark==5.1.0
- pytest-cov==6.0.0
- requests==2.32.3
- ruff==0.11.2
- toposort==1.10
- typing-extensions==4.13.0
- untokenize==0.1.1
- urllib3==2.3.0
- xsdata==24.11
prefix: /opt/conda/envs/xsdata
| [
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_build_envelope_fault"
] | [] | [
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_attributes",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_build_envelope_class",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_build_envelope_class_with_style_rpc",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_build_envelope_fault_raises_error_if_missing_inner_body",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_build_envelope_fault_with_detail_messages",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_build_message_class",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_build_parts_attributes",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_get_or_create_inner_class",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_map",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_map_binding",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_map_binding_message_parts_with_all_parts",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_map_binding_message_parts_with_original_message",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_map_binding_message_parts_with_part_token",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_map_binding_message_parts_with_token_parts",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_map_binding_operation",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_map_binding_operation_messages",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_map_binding_operation_messages_with_style_rpc",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_map_port",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_map_port_type_message",
"tests/codegen/mappers/test_definitions.py::DefinitionsMapperTests::test_operation_namespace"
] | [] | MIT License | 20,374 | 138 | [
"xsdata/codegen/mappers/definitions.py"
] |
scikit-hep__awkward-3322 | 7b150aab6b56b24e24b47a8fc92904fa1984d8dc | 2024-12-02 18:44:49 | 62a9e6b8580e43bba14d140c846f9b9741e49007 | diff --git a/src/awkward/_broadcasting.py b/src/awkward/_broadcasting.py
index ae8f1f81..9c4b51c5 100644
--- a/src/awkward/_broadcasting.py
+++ b/src/awkward/_broadcasting.py
@@ -860,7 +860,7 @@ def apply_step(
if not isinstance(xyc, Content):
unmasked.append(xyc)
masks.append(
- NumpyArray(backend.nplike.zeros(len(inputs[2]), dtype=np.int8))
+ NumpyArray(backend.nplike.zeros(inputs[2].length, dtype=np.int8))
)
elif not xyc.is_option:
unmasked.append(xyc)
| ak.where unexpectedly fails on typetracers of option types
### Version of Awkward Array
2.7.1
### Description and code to reproduce
The following code:
```python3
import json
import awkward as ak
fromdict = {'class': 'RecordArray', 'fields': ['muon', 'jet'], 'contents': [{'class': 'ListOffsetArray', 'offsets': 'i64', 'content': {'class': 'RecordArray', 'fields': ['pt', 'eta', 'phi', 'crossref'], 'contents': [{'class': 'NumpyArray', 'pr\
imitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': "muon_pt!"}, {'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': "muon_eta!"}, {'class': 'NumpyArray', 'primitive': 'int64', 'inner_\
shape': [], 'parameters': {}, 'form_key': "muon_phi!"}, {"class": "ListOffsetArray", "offsets": "i64", "content": {"class": "NumpyArray", "primitive": "int64", "inner_shape": [], "parameters": {}, "form_key": "muon_crossref_content!"}, "parame\
ters": {}, "form_key": "muon_crossref_index!"}], 'parameters': {}, 'form_key': "muon_record!"}, 'parameters': {}, 'form_key': "muon_list!"}, {'class': 'ListOffsetArray', 'offsets': 'i64', 'content': {'class': 'RecordArray', 'fields': ['pt', 'e\
ta', 'phi', "crossref", "thing1"], 'contents': [{'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': "jet_pt!"}, {'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form\
_key': "jet_eta!"}, {'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': "jet_phi!"}, {"class": "ListOffsetArray", "offsets": "i64", "content": {"class": "NumpyArray", "primitive": "int64", "inner_shap\
e": [], "parameters": {}, "form_key": "jet_crossref_content!"}, "parameters": {}, "form_key": "jet_crossref_index!"}, {'class': 'NumpyArray', 'primitive': 'int64', 'inner_shape': [], 'parameters': {}, 'form_key': "jet_thing1!"}], 'parameters':\
{}, 'form_key': "jet_record!"}, 'parameters': {}, 'form_key': "jet_list!"}], 'parameters': {}, 'form_key': "outer!"}
form = ak.forms.from_dict(fromdict)
ttlayout, report = ak.typetracer.typetracer_with_report(form)
ttarray = ak.Array(ttlayout)
maybe = ak.firsts(ttarray)
varmaybe = ak.pad_none(ttarray, 3)
where = ak.where(abs(ttarray.jet.eta) < 1., 0.000511, ttarray.jet.thing1)
varmaybe_where = ak.where(abs(varmaybe.jet.eta) < 1., 0.000511, varmaybe.jet.thing1)
maybe_where = ak.where(abs(maybe.jet.eta) < 1., 0.000511, maybe.jet.thing1)
```
Fails with:
```
Traceback (most recent call last):
File "/Users/lgray/coffea-dev/coffea/form_madness_again.py", line 18, in <module>
varmaybe_where = ak.where(abs(varmaybe.jet.eta) < 1., 0.000511, varmaybe.jet.thing1)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/_dispatch.py", line 64, in dispatch
next(gen_or_result)
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/operations/ak_where.py", line 65, in where
return _impl3(condition, x, y, mergebool, highlevel, behavior, attrs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/operations/ak_where.py", line 130, in _impl3
out = ak._broadcasting.broadcast_and_apply(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/_broadcasting.py", line 1200, in broadcast_and_apply
out = apply_step(
^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/_broadcasting.py", line 1178, in apply_step
return continuation()
^^^^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/_broadcasting.py", line 1147, in continuation
return broadcast_any_list()
^^^^^^^^^^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/_broadcasting.py", line 671, in broadcast_any_list
outcontent = apply_step(
^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/_broadcasting.py", line 1178, in apply_step
return continuation()
^^^^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/_broadcasting.py", line 1147, in continuation
return broadcast_any_list()
^^^^^^^^^^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/_broadcasting.py", line 747, in broadcast_any_list
outcontent = apply_step(
^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/_broadcasting.py", line 1178, in apply_step
return continuation()
^^^^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/_broadcasting.py", line 1141, in continuation
return broadcast_any_option_akwhere()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/_broadcasting.py", line 863, in broadcast_any_option_akwhere
NumpyArray(backend.nplike.zeros(len(inputs[2]), dtype=np.int8))
^^^^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/contents/content.py", line 272, in __len__
return int(self.length)
^^^^^^^^^^^^^^^^
File "/Users/lgray/miniforge3/envs/coffea-dev/lib/python3.12/site-packages/awkward/_nplikes/shape.py", line 79, in __int__
raise TypeError("cannot interpret unknown lengths as concrete values")
TypeError: cannot interpret unknown lengths as concrete values
This error occurred while calling
ak.where(
<Array-typetracer [...] type='## * var * ?bool'>
0.000511
<Array-typetracer [...] type='## * var * ?int64'>
)
``` | scikit-hep/awkward | diff --git a/tests/test_3321_akwhere_typetracer_lengths_optiontypes.py b/tests/test_3321_akwhere_typetracer_lengths_optiontypes.py
new file mode 100644
index 00000000..6c7e2343
--- /dev/null
+++ b/tests/test_3321_akwhere_typetracer_lengths_optiontypes.py
@@ -0,0 +1,139 @@
+from __future__ import annotations
+
+import awkward as ak
+
+fromdict = {
+ "class": "RecordArray",
+ "fields": ["muon", "jet"],
+ "contents": [
+ {
+ "class": "ListOffsetArray",
+ "offsets": "i64",
+ "content": {
+ "class": "RecordArray",
+ "fields": ["pt", "eta", "phi", "crossref"],
+ "contents": [
+ {
+ "class": "NumpyArray",
+ "primitive": "int64",
+ "inner_shape": [],
+ "parameters": {},
+ "form_key": "muon_pt!",
+ },
+ {
+ "class": "NumpyArray",
+ "primitive": "int64",
+ "inner_shape": [],
+ "parameters": {},
+ "form_key": "muon_eta!",
+ },
+ {
+ "class": "NumpyArray",
+ "primitive": "int64",
+ "inner_shape": [],
+ "parameters": {},
+ "form_key": "muon_phi!",
+ },
+ {
+ "class": "ListOffsetArray",
+ "offsets": "i64",
+ "content": {
+ "class": "NumpyArray",
+ "primitive": "int64",
+ "inner_shape": [],
+ "parameters": {},
+ "form_key": "muon_crossref_content!",
+ },
+ "parameters": {},
+ "form_key": "muon_crossref_index!",
+ },
+ ],
+ "parameters": {},
+ "form_key": "muon_record!",
+ },
+ "parameters": {},
+ "form_key": "muon_list!",
+ },
+ {
+ "class": "ListOffsetArray",
+ "offsets": "i64",
+ "content": {
+ "class": "RecordArray",
+ "fields": [
+ "pt",
+ "eta",
+ "phi",
+ "crossref",
+ "thing1",
+ ],
+ "contents": [
+ {
+ "class": "NumpyArray",
+ "primitive": "int64",
+ "inner_shape": [],
+ "parameters": {},
+ "form_key": "jet_pt!",
+ },
+ {
+ "class": "NumpyArray",
+ "primitive": "int64",
+ "inner_shape": [],
+ "parameters": {},
+ "form_key": "jet_eta!",
+ },
+ {
+ "class": "NumpyArray",
+ "primitive": "int64",
+ "inner_shape": [],
+ "parameters": {},
+ "form_key": "jet_phi!",
+ },
+ {
+ "class": "ListOffsetArray",
+ "offsets": "i64",
+ "content": {
+ "class": "NumpyArray",
+ "primitive": "int64",
+ "inner_shape": [],
+ "parameters": {},
+ "form_key": "jet_crossref_content!",
+ },
+ "parameters": {},
+ "form_key": "jet_crossref_index!",
+ },
+ {
+ "class": "NumpyArray",
+ "primitive": "int64",
+ "inner_shape": [],
+ "parameters": {},
+ "form_key": "jet_thing1!",
+ },
+ ],
+ "parameters": {},
+ "form_key": "jet_record!",
+ },
+ "parameters": {},
+ "form_key": "jet_list!",
+ },
+ ],
+ "parameters": {},
+ "form_key": "outer!",
+}
+
+form = ak.forms.from_dict(fromdict)
+ttlayout, report = ak.typetracer.typetracer_with_report(form)
+ttarray = ak.Array(ttlayout)
+
+
+def test_where():
+ ak.where(abs(ttarray.jet.eta) < 1.0, 0.000511, ttarray.jet.thing1)
+
+
+def test_maybe_where():
+ maybe = ak.firsts(ttarray)
+ ak.where(abs(maybe.jet.eta) < 1.0, 0.000511, maybe.jet.thing1)
+
+
+def test_varmaybe_where():
+ varmaybe = ak.pad_none(ttarray, 3)
+ ak.where(abs(varmaybe.jet.eta) < 1.0, 0.000511, varmaybe.jet.thing1)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 2.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"uproot"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-test-full.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/scikit-hep/awkward.git@7b150aab6b56b24e24b47a8fc92904fa1984d8dc#egg=awkward
awkward_cpp==42
coverage==7.8.0
cramjam==2.9.1
exceptiongroup==1.2.2
execnet==2.1.1
fsspec==2025.3.1
importlib_metadata==8.6.1
iniconfig==2.1.0
jax==0.4.30
jaxlib==0.4.30
llvmlite==0.43.0
ml_dtypes==0.5.1
numba==0.60.0
numexpr==2.10.2
numpy==2.0.2
opt_einsum==3.4.0
packaging==24.2
pandas==2.2.3
pluggy==1.5.0
pyarrow==16.0.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
scipy==1.13.1
six==1.17.0
tomli==2.2.1
typing_extensions==4.13.0
tzdata==2025.2
uproot==5.6.0
xxhash==3.5.0
zipp==3.21.0
| name: awkward
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- awkward==2.7.1
- awkward-cpp==42
- coverage==7.8.0
- cramjam==2.9.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- fsspec==2025.3.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jax==0.4.30
- jaxlib==0.4.30
- llvmlite==0.43.0
- ml-dtypes==0.5.1
- numba==0.60.0
- numexpr==2.10.2
- numpy==2.0.2
- opt-einsum==3.4.0
- packaging==24.2
- pandas==2.2.3
- pluggy==1.5.0
- pyarrow==16.0.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scipy==1.13.1
- six==1.17.0
- tomli==2.2.1
- typing-extensions==4.13.0
- tzdata==2025.2
- uproot==5.6.0
- xxhash==3.5.0
- zipp==3.21.0
prefix: /opt/conda/envs/awkward
| [
"tests/test_3321_akwhere_typetracer_lengths_optiontypes.py::test_maybe_where",
"tests/test_3321_akwhere_typetracer_lengths_optiontypes.py::test_varmaybe_where"
] | [] | [
"tests/test_3321_akwhere_typetracer_lengths_optiontypes.py::test_where"
] | [] | BSD 3-Clause "New" or "Revised" License | 20,380 | 167 | [
"src/awkward/_broadcasting.py"
] |
|
AgentOps-AI__AgentStack-77 | c2725af63fefa393169f30be0689f2b4f3f0e4b3 | 2024-12-02 18:51:11 | c2725af63fefa393169f30be0689f2b4f3f0e4b3 | diff --git a/agentstack/cli/__init__.py b/agentstack/cli/__init__.py
index 3c35ec3..afd42af 100644
--- a/agentstack/cli/__init__.py
+++ b/agentstack/cli/__init__.py
@@ -1,1 +1,1 @@
-from .cli import init_project_builder, list_tools
+from .cli import init_project_builder, list_tools, configure_default_model
diff --git a/agentstack/cli/cli.py b/agentstack/cli/cli.py
index 9b560d1..f10866b 100644
--- a/agentstack/cli/cli.py
+++ b/agentstack/cli/cli.py
@@ -16,10 +16,18 @@ from cookiecutter.main import cookiecutter
from .agentstack_data import FrameworkData, ProjectMetadata, ProjectStructure, CookiecutterData
from agentstack.logger import log
from agentstack.utils import get_package_path
+from agentstack.generation.files import ConfigFile
from agentstack.generation.tool_generation import get_all_tools
from .. import generation
from ..utils import open_json_file, term_color, is_snake_case
+PREFERRED_MODELS = [
+ 'openai/gpt-4o',
+ 'anthropic/claude-3-5-sonnet',
+ 'openai/o1-preview',
+ 'openai/gpt-4-turbo',
+ 'anthropic/claude-3-opus',
+]
def init_project_builder(slug_name: Optional[str] = None, template: Optional[str] = None, use_wizard: bool = False):
if slug_name and not is_snake_case(slug_name):
@@ -114,6 +122,27 @@ def welcome_message():
print(border)
+def configure_default_model(path: Optional[str] = None):
+ """Set the default model"""
+ agentstack_config = ConfigFile(path)
+ if agentstack_config.default_model:
+ return # Default model already set
+
+ print("Project does not have a default model configured.")
+ other_msg = f"Other (enter a model name)"
+ model = inquirer.list_input(
+ message="Which model would you like to use?",
+ choices=PREFERRED_MODELS + [other_msg],
+ )
+
+ if model == other_msg: # If the user selects "Other", prompt for a model name
+ print(f'A list of available models is available at: "https://docs.litellm.ai/docs/providers"')
+ model = inquirer.text(message="Enter the model name")
+
+ with ConfigFile(path) as agentstack_config:
+ agentstack_config.default_model = model
+
+
def ask_framework() -> str:
framework = "CrewAI"
# framework = inquirer.list_input(
diff --git a/agentstack/generation/agent_generation.py b/agentstack/generation/agent_generation.py
index f13a5d9..bf64dd2 100644
--- a/agentstack/generation/agent_generation.py
+++ b/agentstack/generation/agent_generation.py
@@ -2,6 +2,7 @@ from typing import Optional, List
from .gen_utils import insert_code_after_tag, get_crew_components, CrewComponent
from agentstack.utils import verify_agentstack_project, get_framework
+from agentstack.generation.files import ConfigFile
import os
from ruamel.yaml import YAML
from ruamel.yaml.scalarstring import FoldedScalarString
@@ -14,6 +15,7 @@ def generate_agent(
backstory: Optional[str],
llm: Optional[str]
):
+ agentstack_config = ConfigFile() # TODO path
if not role:
role = 'Add your role here'
if not goal:
@@ -21,7 +23,7 @@ def generate_agent(
if not backstory:
backstory = 'Add your backstory here'
if not llm:
- llm = 'openai/gpt-4o'
+ llm = agentstack_config.default_model
verify_agentstack_project()
@@ -37,9 +39,6 @@ def generate_agent(
print(f"Added agent \"{name}\" to your AgentStack project successfully!")
-
-
-
def generate_crew_agent(
name,
role: Optional[str] = 'Add your role here',
diff --git a/agentstack/generation/files.py b/agentstack/generation/files.py
index 0fc1fb1..b1c226c 100644
--- a/agentstack/generation/files.py
+++ b/agentstack/generation/files.py
@@ -31,10 +31,13 @@ class ConfigFile(BaseModel):
A list of tools that are currently installed in the project.
telemetry_opt_out: Optional[bool]
Whether the user has opted out of telemetry.
+ default_model: Optional[str]
+ The default model to use when generating agent configurations.
"""
framework: Optional[str] = DEFAULT_FRAMEWORK
tools: list[str] = []
telemetry_opt_out: Optional[bool] = None
+ default_model: Optional[str] = None
def __init__(self, path: Union[str, Path, None] = None):
path = Path(path) if path else Path.cwd()
diff --git a/agentstack/main.py b/agentstack/main.py
index 14a448c..77a7ed7 100644
--- a/agentstack/main.py
+++ b/agentstack/main.py
@@ -2,7 +2,7 @@ import argparse
import os
import sys
-from agentstack.cli import init_project_builder, list_tools
+from agentstack.cli import init_project_builder, list_tools, configure_default_model
from agentstack.telemetry import track_cli_command
from agentstack.utils import get_version, get_framework
import agentstack.generation as generation
@@ -102,6 +102,8 @@ def main():
os.system('python src/main.py')
elif args.command in ['generate', 'g']:
if args.generate_command in ['agent', 'a']:
+ if not args.llm:
+ configure_default_model()
generation.generate_agent(args.name, args.role, args.goal, args.backstory, args.llm)
elif args.generate_command in ['task', 't']:
generation.generate_task(args.name, args.description, args.expected_output, args.agent)
| Dynamically load model providers
In the agent wizard section of the CLI, it asks to enter the model and provider for the agent to use.
Any provider/model that works in LiteLLM should be accepted.
Import or create a list of all acceptable providers and associated models.
In the AgentWizard, ask the user to select the provider (show top 5 most common, then an other option that shows all).
After selecting the provider, ask them which model.
The associated provider/model should be stored in the agent datatype as a string with format `provider/model` | AgentOps-AI/AgentStack | diff --git a/tests/test_generation_files.py b/tests/test_generation_files.py
index 8f8549e..e2d80d7 100644
--- a/tests/test_generation_files.py
+++ b/tests/test_generation_files.py
@@ -14,6 +14,7 @@ class GenerationFilesTest(unittest.TestCase):
assert config.framework == "crewai"
assert config.tools == ["tool1", "tool2"]
assert config.telemetry_opt_out is None
+ assert config.default_model is None
def test_write_config(self):
try:
@@ -25,6 +26,7 @@ class GenerationFilesTest(unittest.TestCase):
config.framework = "crewai"
config.tools = ["tool1", "tool2"]
config.telemetry_opt_out = True
+ config.default_model = "openai/gpt-4o"
tmp_data = open(BASE_PATH/"tmp/agentstack.json").read()
assert tmp_data == """{
@@ -33,7 +35,8 @@ class GenerationFilesTest(unittest.TestCase):
"tool1",
"tool2"
],
- "telemetry_opt_out": true
+ "telemetry_opt_out": true,
+ "default_model": "openai/gpt-4o"
}"""
except Exception as e:
raise e
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 5
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.10",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/AgentOps-AI/AgentStack.git@c2725af63fefa393169f30be0689f2b4f3f0e4b3#egg=agentstack
annotated-types==0.7.0
arrow==1.3.0
art==6.4
astor==0.8.1
binaryornot==0.4.4
blessed==1.20.0
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
click==8.1.8
cookiecutter==2.6.0
editor==1.6.6
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
inquirer==3.4.0
Jinja2==3.1.6
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mdurl==0.1.2
packaging==24.2
pluggy==1.5.0
psutil==5.9.0
pydantic==2.11.1
pydantic_core==2.33.0
Pygments==2.19.1
pytest==8.3.5
python-dateutil==2.9.0.post0
python-slugify==8.0.4
PyYAML==6.0.2
readchar==4.2.1
requests==2.32.3
rich==14.0.0
ruamel.yaml==0.18.10
ruamel.yaml.base==0.3.2
ruamel.yaml.clib==0.2.12
runs==1.2.2
shellingham==1.5.4
six==1.17.0
text-unidecode==1.3
toml==0.10.2
tomli==2.2.1
typer==0.15.2
types-python-dateutil==2.9.0.20241206
typing-inspection==0.4.0
typing_extensions==4.13.0
urllib3==2.3.0
wcwidth==0.2.13
xmod==1.8.1
| name: AgentStack
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- agentstack==0.2.0
- annotated-types==0.7.0
- arrow==1.3.0
- art==6.4
- astor==0.8.1
- binaryornot==0.4.4
- blessed==1.20.0
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- click==8.1.8
- cookiecutter==2.6.0
- editor==1.6.6
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- inquirer==3.4.0
- jinja2==3.1.6
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- mdurl==0.1.2
- packaging==24.2
- pluggy==1.5.0
- psutil==5.9.0
- pydantic==2.11.1
- pydantic-core==2.33.0
- pygments==2.19.1
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-slugify==8.0.4
- pyyaml==6.0.2
- readchar==4.2.1
- requests==2.32.3
- rich==14.0.0
- ruamel-yaml==0.18.10
- ruamel-yaml-base==0.3.2
- ruamel-yaml-clib==0.2.12
- runs==1.2.2
- shellingham==1.5.4
- six==1.17.0
- text-unidecode==1.3
- toml==0.10.2
- tomli==2.2.1
- typer==0.15.2
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- urllib3==2.3.0
- wcwidth==0.2.13
- xmod==1.8.1
prefix: /opt/conda/envs/AgentStack
| [
"tests/test_generation_files.py::GenerationFilesTest::test_read_config",
"tests/test_generation_files.py::GenerationFilesTest::test_write_config"
] | [] | [
"tests/test_generation_files.py::GenerationFilesTest::test_get_framework",
"tests/test_generation_files.py::GenerationFilesTest::test_get_telemetry_opt_out",
"tests/test_generation_files.py::GenerationFilesTest::test_read_env",
"tests/test_generation_files.py::GenerationFilesTest::test_read_missing_config",
"tests/test_generation_files.py::GenerationFilesTest::test_verify_agentstack_project_invalid",
"tests/test_generation_files.py::GenerationFilesTest::test_verify_agentstack_project_valid",
"tests/test_generation_files.py::GenerationFilesTest::test_write_env"
] | [] | MIT License | 20,381 | 1,428 | [
"agentstack/cli/__init__.py",
"agentstack/cli/cli.py",
"agentstack/generation/agent_generation.py",
"agentstack/generation/files.py",
"agentstack/main.py"
] |
|
vyperlang__vyper-4385 | c8691ac5dd95623991e51205bc90a720fc513766 | 2024-12-03 02:47:46 | f444c8fa3b02f34181cfa8768bcf572aedc29659 | diff --git a/vyper/semantics/analysis/utils.py b/vyper/semantics/analysis/utils.py
index a31ce7ac..8727f375 100644
--- a/vyper/semantics/analysis/utils.py
+++ b/vyper/semantics/analysis/utils.py
@@ -41,7 +41,7 @@ def _validate_op(node, types_list, validation_fn_name):
try:
_validate_fn(node)
ret.append(type_)
- except InvalidOperation as e:
+ except (InvalidOperation, OverflowException) as e:
err_list.append(e)
if ret:
diff --git a/vyper/semantics/types/primitives.py b/vyper/semantics/types/primitives.py
index 5c0362e6..dcc4fe8c 100644
--- a/vyper/semantics/types/primitives.py
+++ b/vyper/semantics/types/primitives.py
@@ -173,11 +173,11 @@ class NumericT(_PrimT):
if isinstance(left, vy_ast.Int):
if left.value >= 2**value_bits:
raise OverflowException(
- "Base is too large, calculation will always overflow", left
+ f"Base is too large for {self}, calculation will always overflow", left
)
elif left.value < -(2**value_bits):
raise OverflowException(
- "Base is too small, calculation will always underflow", left
+ f"Base is too small for {self}, calculation will always underflow", left
)
elif isinstance(right, vy_ast.Int):
if right.value < 0:
| incorrectly reported overflow exception
### Version Information
* vyper Version (output of `vyper --version`): 0.4.0
### What's your issue about?
```vyper
@external
def foo() -> uint256:
return (10**18)**2
```
panics at compilation with the following error, which is clearly wrong given that 10**36 clearly fits in a uint256:
```
vyper.exceptions.OverflowException: Base is too large, calculation will always overflow
contract "contracts/token.vy:3", function "foo", line 3:12
2 def foo() -> uint256:
---> 3 return (10**18)**2
-------------------^
```
| vyperlang/vyper | diff --git a/tests/functional/codegen/types/numbers/test_exponents.py b/tests/functional/codegen/types/numbers/test_exponents.py
index 702cbcb1..28dba59e 100644
--- a/tests/functional/codegen/types/numbers/test_exponents.py
+++ b/tests/functional/codegen/types/numbers/test_exponents.py
@@ -173,3 +173,17 @@ def foo(b: int128) -> int128:
c.foo(max_power)
with tx_failed():
c.foo(max_power + 1)
+
+
+valid_list = [
+ """
+@external
+def foo() -> uint256:
+ return (10**18)**2
+ """
+]
+
+
[email protected]("good_code", valid_list)
+def test_exponent_success(good_code):
+ assert compile_code(good_code) is not None
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 0.4 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [],
"python": "3.10",
"reqs_path": [],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiohappyeyeballs==2.6.1
aiohttp==3.11.14
aiosignal==1.3.2
altgraph==0.17.4
annotated-types==0.7.0
asttokens==2.4.1
async-timeout==5.0.1
attrs==25.3.0
backports.tarfile==1.2.0
bitarray==3.3.0
black==23.12.0
cached-property==2.0.1
cbor2==5.6.5
certifi==2025.1.31
cffi==1.17.1
cfgv==3.4.0
charset-normalizer==3.4.1
ckzg==2.1.0
click==8.1.8
coverage==7.8.0
cryptography==44.0.2
cytoolz==1.0.1
decorator==5.2.1
distlib==0.3.9
docutils==0.21.2
eth-account==0.12.2
eth-bloom==3.1.0
eth-hash==0.7.1
eth-keyfile==0.9.1
eth-keys==0.6.1
eth-rlp==2.2.0
eth-stdlib==0.2.7
eth-typing==5.2.0
eth-utils==5.2.0
eth_abi==5.2.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
executing==2.2.0
filelock==3.18.0
flake8==6.1.0
flake8-bugbear==23.12.2
flake8-use-fstring==1.4
frozenlist==1.5.0
hexbytes==1.3.0
hypothesis==6.130.5
identify==2.6.9
idna==3.10
importlib_metadata==8.6.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
ipython==8.34.0
isort==5.13.2
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jedi==0.19.2
jeepney==0.9.0
keyring==25.6.0
lark==1.1.9
lru-dict==1.3.0
markdown-it-py==3.0.0
matplotlib-inline==0.1.7
mccabe==0.7.0
mdurl==0.1.2
more-itertools==10.6.0
multidict==6.2.0
mypy==1.5.0
mypy-extensions==1.0.0
nh3==0.2.21
nodeenv==1.9.1
packaging==23.2
parsimonious==0.10.0
parso==0.8.4
pathspec==0.12.1
pexpect==4.9.0
pkginfo==1.12.1.2
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pre_commit==4.2.0
prompt_toolkit==3.0.50
propcache==0.3.1
ptyprocess==0.7.0
pure_eval==0.2.3
py-ecc==7.0.1
py-evm==0.10.1b2
pycodestyle==2.11.1
pycparser==2.22
pycryptodome==3.22.0
pydantic==2.11.1
pydantic_core==2.33.0
pyflakes==3.1.0
Pygments==2.19.1
pyinstaller==6.12.0
pyinstaller-hooks-contrib==2025.2
pyrevm==0.3.3
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==4.1.0
pytest-instafail==0.5.0
pytest-split==0.10.0
pytest-xdist==3.3.1
PyYAML==6.0.2
readme_renderer==44.0
regex==2024.11.6
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
rlp==4.1.0
SecretStorage==3.3.3
six==1.17.0
sortedcontainers==2.4.0
stack-data==0.6.3
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
toolz==1.0.0
traitlets==5.14.3
trie==3.1.0
twine==6.0.1
typing-inspection==0.4.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
-e git+https://github.com/vyperlang/vyper.git@c8691ac5dd95623991e51205bc90a720fc513766#egg=vyper
wcwidth==0.2.13
yarl==1.18.3
zipp==3.21.0
| name: vyper
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py310h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- pluggy=1.5.0=py310h06a4308_0
- pytest=8.3.4=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py310h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiohappyeyeballs==2.6.1
- aiohttp==3.11.14
- aiosignal==1.3.2
- altgraph==0.17.4
- annotated-types==0.7.0
- asttokens==2.4.1
- async-timeout==5.0.1
- attrs==25.3.0
- backports-tarfile==1.2.0
- bitarray==3.3.0
- black==23.12.0
- cached-property==2.0.1
- cbor2==5.6.5
- certifi==2025.1.31
- cffi==1.17.1
- cfgv==3.4.0
- charset-normalizer==3.4.1
- ckzg==2.1.0
- click==8.1.8
- coverage==7.8.0
- cryptography==44.0.2
- cytoolz==1.0.1
- decorator==5.2.1
- distlib==0.3.9
- docutils==0.21.2
- eth-abi==5.2.0
- eth-account==0.12.2
- eth-bloom==3.1.0
- eth-hash==0.7.1
- eth-keyfile==0.9.1
- eth-keys==0.6.1
- eth-rlp==2.2.0
- eth-stdlib==0.2.7
- eth-typing==5.2.0
- eth-utils==5.2.0
- execnet==2.1.1
- executing==2.2.0
- filelock==3.18.0
- flake8==6.1.0
- flake8-bugbear==23.12.2
- flake8-use-fstring==1.4
- frozenlist==1.5.0
- hexbytes==1.3.0
- hypothesis==6.130.5
- identify==2.6.9
- idna==3.10
- importlib-metadata==8.6.1
- ipython==8.34.0
- isort==5.13.2
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jedi==0.19.2
- jeepney==0.9.0
- keyring==25.6.0
- lark==1.1.9
- lru-dict==1.3.0
- markdown-it-py==3.0.0
- matplotlib-inline==0.1.7
- mccabe==0.7.0
- mdurl==0.1.2
- more-itertools==10.6.0
- multidict==6.2.0
- mypy==1.5.0
- mypy-extensions==1.0.0
- nh3==0.2.21
- nodeenv==1.9.1
- packaging==23.2
- parsimonious==0.10.0
- parso==0.8.4
- pathspec==0.12.1
- pexpect==4.9.0
- pkginfo==1.12.1.2
- platformdirs==4.3.7
- pre-commit==4.2.0
- prompt-toolkit==3.0.50
- propcache==0.3.1
- ptyprocess==0.7.0
- pure-eval==0.2.3
- py-ecc==7.0.1
- py-evm==0.10.1b2
- pycodestyle==2.11.1
- pycparser==2.22
- pycryptodome==3.22.0
- pydantic==2.11.1
- pydantic-core==2.33.0
- pyflakes==3.1.0
- pygments==2.19.1
- pyinstaller==6.12.0
- pyinstaller-hooks-contrib==2025.2
- pyrevm==0.3.3
- pytest-cov==4.1.0
- pytest-instafail==0.5.0
- pytest-split==0.10.0
- pytest-xdist==3.3.1
- pyyaml==6.0.2
- readme-renderer==44.0
- regex==2024.11.6
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- rlp==4.1.0
- secretstorage==3.3.3
- six==1.17.0
- sortedcontainers==2.4.0
- stack-data==0.6.3
- toolz==1.0.0
- traitlets==5.14.3
- trie==3.1.0
- twine==6.0.1
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- urllib3==2.3.0
- virtualenv==20.29.3
- vyper==0.4.1b3
- wcwidth==0.2.13
- yarl==1.18.3
- zipp==3.21.0
prefix: /opt/conda/envs/vyper
| [
"tests/functional/codegen/types/numbers/test_exponents.py::test_exponent_success[\\n@external\\ndef"
] | [] | [
"tests/functional/codegen/types/numbers/test_exponents.py::test_compiler_hang",
"tests/functional/codegen/types/numbers/test_exponents.py::test_fold_nonliteral",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[2]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[3]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[4]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[5]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[31]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[189]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[7]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[32]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[190]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[70]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[191]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int16[10]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[33]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[55]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[8]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int16[11]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[71]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[9]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[192]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[34]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[72]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[73]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[56]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int16[2]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[77]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[161]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[74]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int16[3]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int16[4]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int16[5]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[95]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[75]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int16[12]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int16[13]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[6]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[7]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[8]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[3]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[224]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[225]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[14]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[15]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[57]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[58]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[59]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[16]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[78]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[17]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[18]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[19]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[162]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[163]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[10]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[11]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[12]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[96]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[226]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[193]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[194]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int16[6]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[35]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[36]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[37]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[133]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[134]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[135]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[136]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[137]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[138]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[97]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[91]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[63]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[64]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[164]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[165]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[4]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[79]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[227]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[65]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[228]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[98]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[42]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int16[7]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int16[8]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[126]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[56]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[35]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[238]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[239]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int16[14]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[140]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[141]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[5]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[60]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[61]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[9]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[80]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[229]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[166]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[99]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[240]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[231]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[105]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[106]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[84]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[43]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[103]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[100]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[119]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[120]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[147]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[71]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[72]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[73]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[63]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[57]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[47]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[142]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[49]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[50]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[44]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[51]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[143]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[58]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[59]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[60]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[127]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[128]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[92]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[93]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[210]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[104]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[39]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[107]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[121]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[252]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[253]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[254]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[48]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[49]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[50]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[129]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[21]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[232]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[130]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[131]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[85]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[40]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[108]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[109]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[110]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[23]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[94]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[95]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[96]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[241]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[52]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[203]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[182]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[183]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[204]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[74]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[184]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[211]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[64]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[212]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[75]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[76]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[242]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[98]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[45]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[119]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[205]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[206]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[51]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[66]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[67]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[68]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[77]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[86]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[24]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[87]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[41]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[65]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[144]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[101]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[213]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[120]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[105]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[36]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[37]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[25]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[66]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[42]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[145]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[233]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[148]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[149]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[67]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[38]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[46]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[52]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[22]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[39]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[234]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[47]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[53]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[26]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[245]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[81]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[121]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[87]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[214]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[23]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[243]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[235]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[122]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[175]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[88]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[88]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[236]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[28]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[185]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[40]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[43]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[106]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[168]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[150]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[82]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[54]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[61]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[207]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[112]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[24]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[89]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[169]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[208]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[107]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[108]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[109]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[122]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[246]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[123]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[124]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[125]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[68]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[215]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[176]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[89]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[151]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[152]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[99]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[100]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[195]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[29]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[25]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[53]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[186]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[187]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[217]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[123]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[69]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[26]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[101]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[102]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[103]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[44]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[27]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[196]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[38]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[62]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[113]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[15]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[247]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[90]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int16[9]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[177]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[30]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[218]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[170]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[197]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[124]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[45]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[178]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[171]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[248]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[28]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[16]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[219]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[91]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[111]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[198]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[2]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[78]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[179]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[10]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[114]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[249]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[220]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[172]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[31]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[29]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[17]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[92]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[199]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[102]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[250]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[32]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[173]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[180]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[200]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[112]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[115]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[93]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[116]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[221]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[113]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[201]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[33]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[117]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[222]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[154]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[126]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[155]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[110]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[14]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[114]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[21]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[22]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[156]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[79]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[70]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[94]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[84]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[85]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[115]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[118]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[83]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[86]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[6]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[34]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[27]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[20]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[13]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[55]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[62]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[48]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[41]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[90]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[76]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[83]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[69]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[202]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[153]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[132]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[244]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[251]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[111]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[160]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[80]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[46]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[209]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[159]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[174]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[104]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[158]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[19]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[30]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[125]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[20]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[54]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[139]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[167]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[188]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[223]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[237]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[18]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[216]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[230]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[118]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[157]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[116]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[181]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[97]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[11]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_uint256[146]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[12]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[13]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[117]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[82]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_exp_int128[81]",
"tests/functional/codegen/types/numbers/test_exponents.py::test_max_exp",
"tests/functional/codegen/types/numbers/test_exponents.py::test_max_exp_int128"
] | [] | Apache License 2.0 | 20,388 | 370 | [
"vyper/semantics/analysis/utils.py",
"vyper/semantics/types/primitives.py"
] |
|
All-Hands-AI__openhands-aci-17 | b551afd07cc9d84ee0322c3334dae3bcd3ee00ea | 2024-12-03 07:52:50 | f9774a3ca86d2ec2430de5dbaef2cf657d48b826 | ryanhoangt: Looks good, the resolve rate is the same as baseline.
```
14:46:18 - openhands:INFO: eval_infer.py:418 - # resolved: 6 / 10. (60.00%)
14:46:18 - openhands:INFO: eval_infer.py:418 - # failed_apply_patch: 0 / 10. (0.00%)
14:46:18 - openhands:INFO: eval_infer.py:418 - # error_eval: 0 / 10. (0.00%)
14:46:18 - openhands:INFO: eval_infer.py:418 - # empty_generation: 0 / 10. (0.00%)
```
Should we cut a release now and raise a PR to fix it in the OH repo, or wait for the navigation commands PR to be merged as well? We may also need to handle the <oh_aci_output> tag added to `main` previously. | diff --git a/openhands_aci/editor/editor.py b/openhands_aci/editor/editor.py
index 0cbb0a1..e98354b 100644
--- a/openhands_aci/editor/editor.py
+++ b/openhands_aci/editor/editor.py
@@ -110,12 +110,17 @@ class OHEditor:
f'No replacement was performed, old_str `{old_str}` did not appear verbatim in {path}.'
)
if occurrences > 1:
- file_content_lines = file_content.split('\n')
- line_numbers = [
- idx + 1
- for idx, line in enumerate(file_content_lines)
- if old_str in line
- ]
+ # Find starting line numbers for each occurrence
+ line_numbers = []
+ start_idx = 0
+ while True:
+ idx = file_content.find(old_str, start_idx)
+ if idx == -1:
+ break
+ # Count newlines before this occurrence to get the line number
+ line_num = file_content.count('\n', 0, idx) + 1
+ line_numbers.append(line_num)
+ start_idx = idx + 1
raise ToolError(
f'No replacement was performed. Multiple occurrences of old_str `{old_str}` in lines {line_numbers}. Please ensure it is unique.'
)
| [Bug]: Editing Error "No replacement was performed" is not informative enough
Cross post from https://github.com/All-Hands-AI/OpenHands/issues/5365 | All-Hands-AI/openhands-aci | diff --git a/tests/integration/test_oh_editor.py b/tests/integration/test_oh_editor.py
index 39b493e..6cdb6d7 100644
--- a/tests/integration/test_oh_editor.py
+++ b/tests/integration/test_oh_editor.py
@@ -149,6 +149,28 @@ def test_str_replace_error_multiple_occurrences(editor):
command='str_replace', path=str(test_file), old_str='test', new_str='sample'
)
assert 'Multiple occurrences of old_str `test`' in str(exc_info.value.message)
+ assert '[1, 2]' in str(exc_info.value.message) # Should show both line numbers
+
+
+def test_str_replace_error_multiple_multiline_occurrences(editor):
+ editor, test_file = editor
+ # Create a file with two identical multi-line blocks
+ multi_block = """def example():
+ print("Hello")
+ return True"""
+ content = f"{multi_block}\n\nprint('separator')\n\n{multi_block}"
+ test_file.write_text(content)
+
+ with pytest.raises(ToolError) as exc_info:
+ editor(
+ command='str_replace',
+ path=str(test_file),
+ old_str=multi_block,
+ new_str='def new():\n print("World")',
+ )
+ error_msg = str(exc_info.value.message)
+ assert 'Multiple occurrences of old_str' in error_msg
+ assert '[1, 7]' in error_msg # Should show correct starting line numbers
def test_str_replace_nonexistent_string(editor):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.12",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiohappyeyeballs==2.6.1
aiohttp==3.11.14
aiosignal==1.3.2
annotated-types==0.7.0
anyio==4.9.0
attrs==25.3.0
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
diskcache==5.6.3
distro==1.9.0
filelock==3.18.0
flake8==7.2.0
frozenlist==1.5.0
fsspec==2025.3.1
gitdb==4.0.12
GitPython==3.1.44
grep-ast==0.3.3
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
huggingface-hub==0.30.0
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
jiter==0.9.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
litellm==1.65.0
MarkupSafe==3.0.2
mccabe==0.7.0
multidict==6.2.0
networkx==3.4.2
numpy==2.2.4
openai==1.69.0
-e git+https://github.com/All-Hands-AI/openhands-aci.git@b551afd07cc9d84ee0322c3334dae3bcd3ee00ea#egg=openhands_aci
packaging==24.2
pandas==2.2.3
pathspec==0.12.1
pluggy==1.5.0
propcache==0.3.1
pycodestyle==2.13.0
pydantic==2.11.1
pydantic_core==2.33.0
pyflakes==3.3.2
pytest==8.3.5
python-dateutil==2.9.0.post0
python-dotenv==1.1.0
pytz==2025.2
PyYAML==6.0.2
referencing==0.36.2
regex==2024.11.6
requests==2.32.3
rpds-py==0.24.0
scipy==1.15.2
setuptools==75.8.0
six==1.17.0
smmap==5.0.2
sniffio==1.3.1
tiktoken==0.9.0
tokenizers==0.21.1
tqdm==4.67.1
tree-sitter==0.21.3
tree-sitter-languages==1.10.2
typing-inspection==0.4.0
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
whatthepatch==1.0.7
wheel==0.45.1
yarl==1.18.3
zipp==3.21.0
| name: openhands-aci
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- expat=2.6.4=h6a678d5_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py312h06a4308_0
- python=3.12.9=h5148396_0
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py312h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py312h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiohappyeyeballs==2.6.1
- aiohttp==3.11.14
- aiosignal==1.3.2
- annotated-types==0.7.0
- anyio==4.9.0
- attrs==25.3.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- diskcache==5.6.3
- distro==1.9.0
- filelock==3.18.0
- flake8==7.2.0
- frozenlist==1.5.0
- fsspec==2025.3.1
- gitdb==4.0.12
- gitpython==3.1.44
- grep-ast==0.3.3
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- huggingface-hub==0.30.0
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- jiter==0.9.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- litellm==1.65.0
- markupsafe==3.0.2
- mccabe==0.7.0
- multidict==6.2.0
- networkx==3.4.2
- numpy==2.2.4
- openai==1.69.0
- openhands-aci==0.1.1
- packaging==24.2
- pandas==2.2.3
- pathspec==0.12.1
- pluggy==1.5.0
- propcache==0.3.1
- pycodestyle==2.13.0
- pydantic==2.11.1
- pydantic-core==2.33.0
- pyflakes==3.3.2
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-dotenv==1.1.0
- pytz==2025.2
- pyyaml==6.0.2
- referencing==0.36.2
- regex==2024.11.6
- requests==2.32.3
- rpds-py==0.24.0
- scipy==1.15.2
- six==1.17.0
- smmap==5.0.2
- sniffio==1.3.1
- tiktoken==0.9.0
- tokenizers==0.21.1
- tqdm==4.67.1
- tree-sitter==0.21.3
- tree-sitter-languages==1.10.2
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- tzdata==2025.2
- urllib3==2.3.0
- whatthepatch==1.0.7
- yarl==1.18.3
- zipp==3.21.0
prefix: /opt/conda/envs/openhands-aci
| [
"tests/integration/test_oh_editor.py::test_str_replace_error_multiple_multiline_occurrences"
] | [] | [
"tests/integration/test_oh_editor.py::test_view_file",
"tests/integration/test_oh_editor.py::test_view_directory",
"tests/integration/test_oh_editor.py::test_create_file",
"tests/integration/test_oh_editor.py::test_str_replace_no_linting",
"tests/integration/test_oh_editor.py::test_str_replace_multi_line_no_linting",
"tests/integration/test_oh_editor.py::test_str_replace_multi_line_with_tabs_no_linting",
"tests/integration/test_oh_editor.py::test_str_replace_with_linting",
"tests/integration/test_oh_editor.py::test_str_replace_error_multiple_occurrences",
"tests/integration/test_oh_editor.py::test_str_replace_nonexistent_string",
"tests/integration/test_oh_editor.py::test_insert_no_linting",
"tests/integration/test_oh_editor.py::test_insert_with_linting",
"tests/integration/test_oh_editor.py::test_insert_invalid_line",
"tests/integration/test_oh_editor.py::test_undo_edit",
"tests/integration/test_oh_editor.py::test_validate_path_invalid",
"tests/integration/test_oh_editor.py::test_create_existing_file_error",
"tests/integration/test_oh_editor.py::test_str_replace_missing_old_str",
"tests/integration/test_oh_editor.py::test_str_replace_new_str_and_old_str_same",
"tests/integration/test_oh_editor.py::test_insert_missing_line_param",
"tests/integration/test_oh_editor.py::test_undo_edit_no_history_error"
] | [] | MIT License | 20,391 | 310 | [
"openhands_aci/editor/editor.py"
] |
PEtab-dev__libpetab-python-327 | 0b77d7fb48ef36c579f9748d7df12365c68a1e24 | 2024-12-03 20:40:03 | 9f11e7319b44b991fb0dc3315f1f3441ae9ac5fd | diff --git a/petab/v1/problem.py b/petab/v1/problem.py
index 4a5577e..6145656 100644
--- a/petab/v1/problem.py
+++ b/petab/v1/problem.py
@@ -251,21 +251,28 @@ class Problem:
)
@staticmethod
- def from_yaml(yaml_config: dict | Path | str) -> Problem:
+ def from_yaml(
+ yaml_config: dict | Path | str, base_path: str | Path = None
+ ) -> Problem:
"""
Factory method to load model and tables as specified by YAML file.
Arguments:
yaml_config: PEtab configuration as dictionary or YAML file name
+ base_path: Base directory or URL to resolve relative paths
"""
if isinstance(yaml_config, Path):
yaml_config = str(yaml_config)
- get_path = lambda filename: filename # noqa: E731
if isinstance(yaml_config, str):
- path_prefix = get_path_prefix(yaml_config)
+ if base_path is None:
+ base_path = get_path_prefix(yaml_config)
yaml_config = yaml.load_yaml(yaml_config)
- get_path = lambda filename: f"{path_prefix}/{filename}" # noqa: E731
+
+ def get_path(filename):
+ if base_path is None:
+ return filename
+ return f"{base_path}/{filename}"
if yaml.is_composite_problem(yaml_config):
raise ValueError(
diff --git a/petab/v2/problem.py b/petab/v2/problem.py
index 612f257..4c36d79 100644
--- a/petab/v2/problem.py
+++ b/petab/v2/problem.py
@@ -117,24 +117,31 @@ class Problem:
)
@staticmethod
- def from_yaml(yaml_config: dict | Path | str) -> Problem:
+ def from_yaml(
+ yaml_config: dict | Path | str, base_path: str | Path = None
+ ) -> Problem:
"""
Factory method to load model and tables as specified by YAML file.
Arguments:
yaml_config: PEtab configuration as dictionary or YAML file name
+ base_path: Base directory or URL to resolve relative paths
"""
if isinstance(yaml_config, Path):
yaml_config = str(yaml_config)
if isinstance(yaml_config, str):
yaml_file = yaml_config
- path_prefix = get_path_prefix(yaml_file)
- yaml_config = yaml.load_yaml(yaml_config)
- get_path = lambda filename: f"{path_prefix}/{filename}" # noqa: E731
+ if base_path is None:
+ base_path = get_path_prefix(yaml_file)
+ yaml_config = yaml.load_yaml(yaml_file)
else:
yaml_file = None
- get_path = lambda filename: filename # noqa: E731
+
+ def get_path(filename):
+ if base_path is None:
+ return filename
+ return f"{base_path}/{filename}"
if yaml_config[FORMAT_VERSION] not in {"2.0.0"}:
# If we got a path to a v1 yaml file, try to auto-upgrade
@@ -186,7 +193,7 @@ class Problem:
else None
)
- if len(problem0[MODEL_FILES]) > 1:
+ if len(problem0[MODEL_FILES] or []) > 1:
# TODO https://github.com/PEtab-dev/libpetab-python/issues/6
raise NotImplementedError(
"Support for multiple models is not yet implemented."
| Path management
When generating a `petab.Problem` from a yaml file, all files are automatically prefixed by the location of the yaml file. However when directly passing the corresponding `dict` (potentially after manipulation), it is no longer possible to specify a prefix and the only way for the import to work correctly is to `os.chdir` to the appropriate directory.
Straightforward to add a corresponding command line argument, but I guess it would be generally great to store a prefix path somewhere in the generated `petab.Problem`, as this would make it easier to reference files within the problem (not part of the current spec afaik, but it would be great to have in the future).
Opinions? | PEtab-dev/libpetab-python | diff --git a/tests/v1/test_petab.py b/tests/v1/test_petab.py
index 65700af..1a3f334 100644
--- a/tests/v1/test_petab.py
+++ b/tests/v1/test_petab.py
@@ -862,11 +862,16 @@ def test_problem_from_yaml_v1_multiple_files():
observables_df, Path(tmpdir, f"observables{i}.tsv")
)
- petab_problem = petab.Problem.from_yaml(yaml_path)
+ petab_problem1 = petab.Problem.from_yaml(yaml_path)
- assert petab_problem.measurement_df.shape[0] == 2
- assert petab_problem.observable_df.shape[0] == 2
- assert petab_problem.condition_df.shape[0] == 2
+ # test that we can load the problem from a dict with a custom base path
+ yaml_config = petab.v1.load_yaml(yaml_path)
+ petab_problem2 = petab.Problem.from_yaml(yaml_config, base_path=tmpdir)
+
+ for petab_problem in (petab_problem1, petab_problem2):
+ assert petab_problem.measurement_df.shape[0] == 2
+ assert petab_problem.observable_df.shape[0] == 2
+ assert petab_problem.condition_df.shape[0] == 2
def test_get_required_parameters_for_parameter_table(petab_problem):
diff --git a/tests/v2/test_problem.py b/tests/v2/test_problem.py
index 334dc86..418f781 100644
--- a/tests/v2/test_problem.py
+++ b/tests/v2/test_problem.py
@@ -1,4 +1,19 @@
+import tempfile
+from pathlib import Path
+
+import pandas as pd
+
+import petab.v2 as petab
from petab.v2 import Problem
+from petab.v2.C import (
+ CONDITION_ID,
+ MEASUREMENT,
+ NOISE_FORMULA,
+ OBSERVABLE_FORMULA,
+ OBSERVABLE_ID,
+ SIMULATION_CONDITION_ID,
+ TIME,
+)
def test_load_remote():
@@ -25,3 +40,68 @@ def test_auto_upgrade():
problem = Problem.from_yaml(yaml_url)
# TODO check something specifically different in a v2 problem
assert isinstance(problem, Problem)
+
+
+def test_problem_from_yaml_multiple_files():
+ """Test loading PEtab version 2 yaml with multiple condition / measurement
+ / observable files
+ """
+ yaml_config = """
+ format_version: 2.0.0
+ parameter_file:
+ problems:
+ - condition_files: [conditions1.tsv, conditions2.tsv]
+ measurement_files: [measurements1.tsv, measurements2.tsv]
+ observable_files: [observables1.tsv, observables2.tsv]
+ model_files:
+ """
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ yaml_path = Path(tmpdir, "problem.yaml")
+ with open(yaml_path, "w") as f:
+ f.write(yaml_config)
+
+ for i in (1, 2):
+ condition_df = pd.DataFrame(
+ {
+ CONDITION_ID: [f"condition{i}"],
+ }
+ )
+ condition_df.set_index([CONDITION_ID], inplace=True)
+ petab.write_condition_df(
+ condition_df, Path(tmpdir, f"conditions{i}.tsv")
+ )
+
+ measurement_df = pd.DataFrame(
+ {
+ SIMULATION_CONDITION_ID: [f"condition{i}"],
+ OBSERVABLE_ID: [f"observable{i}"],
+ TIME: [i],
+ MEASUREMENT: [1],
+ }
+ )
+ petab.write_measurement_df(
+ measurement_df, Path(tmpdir, f"measurements{i}.tsv")
+ )
+
+ observables_df = pd.DataFrame(
+ {
+ OBSERVABLE_ID: [f"observable{i}"],
+ OBSERVABLE_FORMULA: [1],
+ NOISE_FORMULA: [1],
+ }
+ )
+ petab.write_observable_df(
+ observables_df, Path(tmpdir, f"observables{i}.tsv")
+ )
+
+ petab_problem1 = petab.Problem.from_yaml(yaml_path)
+
+ # test that we can load the problem from a dict with a custom base path
+ yaml_config = petab.load_yaml(yaml_path)
+ petab_problem2 = petab.Problem.from_yaml(yaml_config, base_path=tmpdir)
+
+ for petab_problem in (petab_problem1, petab_problem2):
+ assert petab_problem.measurement_df.shape[0] == 2
+ assert petab_problem.observable_df.shape[0] == 2
+ assert petab_problem.condition_df.shape[0] == 2
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 2
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.10",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | antlr4-python3-runtime==4.13.1
attrs==25.3.0
cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
iniconfig==2.1.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
mpmath==1.3.0
numpy==2.2.4
packaging==24.2
pandas==2.2.3
-e git+https://github.com/PEtab-dev/libpetab-python.git@0b77d7fb48ef36c579f9748d7df12365c68a1e24#egg=petab
platformdirs==4.3.7
pluggy==1.5.0
pyarrow==19.0.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
python-libsbml==5.20.4
pytz==2025.2
PyYAML==6.0.2
referencing==0.36.2
rpds-py==0.24.0
six==1.17.0
sympy==1.13.3
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: libpetab-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- antlr4-python3-runtime==4.13.1
- attrs==25.3.0
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- iniconfig==2.1.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- mpmath==1.3.0
- numpy==2.2.4
- packaging==24.2
- pandas==2.2.3
- petab==0.5.0
- platformdirs==4.3.7
- pluggy==1.5.0
- pyarrow==19.0.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- python-libsbml==5.20.4
- pytz==2025.2
- pyyaml==6.0.2
- referencing==0.36.2
- rpds-py==0.24.0
- six==1.17.0
- sympy==1.13.3
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/libpetab-python
| [
"tests/v1/test_petab.py::test_problem_from_yaml_v1_multiple_files",
"tests/v2/test_problem.py::test_problem_from_yaml_multiple_files"
] | [
"tests/v1/test_petab.py::test_create_parameter_df"
] | [
"tests/v1/test_petab.py::test_split_parameter_replacement_list",
"tests/v1/test_petab.py::test_get_measurement_parameter_ids",
"tests/v1/test_petab.py::test_get_priors_from_df",
"tests/v1/test_petab.py::test_startpoint_sampling",
"tests/v1/test_petab.py::test_startpoint_sampling_dict",
"tests/v1/test_petab.py::test_flatten_timepoint_specific_output_overrides",
"tests/v1/test_petab.py::test_flatten_timepoint_specific_output_overrides_special_cases",
"tests/v1/test_petab.py::test_concat_measurements",
"tests/v1/test_petab.py::test_concat_condition_df",
"tests/v1/test_petab.py::test_to_float_if_float",
"tests/v1/test_petab.py::test_load_remote",
"tests/v1/test_petab.py::test_problem_from_yaml_v1_empty",
"tests/v2/test_problem.py::test_load_remote",
"tests/v2/test_problem.py::test_auto_upgrade"
] | [] | MIT License | 20,396 | 842 | [
"petab/v1/problem.py",
"petab/v2/problem.py"
] |
|
tobymao__sqlglot-4472 | 3945acc4a0dfd58147de929c9a2c71734d8f1ade | 2024-12-04 10:01:55 | 3945acc4a0dfd58147de929c9a2c71734d8f1ade | diff --git a/sqlglot/dialects/postgres.py b/sqlglot/dialects/postgres.py
index ce2daf23..e0f3c1b2 100644
--- a/sqlglot/dialects/postgres.py
+++ b/sqlglot/dialects/postgres.py
@@ -476,6 +476,20 @@ class Postgres(Dialect):
and self.dialect.to_json_path(self._parse_bitwise()),
)
+ def _parse_generated_as_identity(
+ self,
+ ) -> (
+ exp.GeneratedAsIdentityColumnConstraint
+ | exp.ComputedColumnConstraint
+ | exp.GeneratedAsRowColumnConstraint
+ ):
+ this = super()._parse_generated_as_identity()
+
+ if self._match_text_seq("STORED"):
+ this = self.expression(exp.ComputedColumnConstraint, this=this.expression)
+
+ return this
+
class Generator(generator.Generator):
SINGLE_STRING_INTERVAL = True
RENAME_TABLE_WITH_DB = False
@@ -691,3 +705,6 @@ class Postgres(Dialect):
if isinstance(seq_get(exprs, 0), exp.Select)
else f"{self.normalize_func('ARRAY')}[{self.expressions(expression, flat=True)}]"
)
+
+ def computedcolumnconstraint_sql(self, expression: exp.ComputedColumnConstraint) -> str:
+ return f"GENERATED ALWAYS AS ({self.sql(expression, 'this')}) STORED"
| Postgres: Unable to parse STORE Generated Column on create table
# About
[Here's the Postgres docs on this syntax](https://www.postgresql.org/docs/current/ddl-generated-columns.html). It seems I can't parse stored column syntax, I noticed I had a similar issue for syntax for adding columns.
> Note I have tried this with the latest release, but also thank you for creating this incredible library
### Example syntax
```sql
CREATE TABLE IF NOT EXISTS example_2 (
col_a INT NOT NULL,
col_b INT NOT NULL,
col_c INT GENERATED ALWAYS AS (col_a + col_b), -- not supported in postgres
col_d INT GENERATED ALWAYS AS (col_a + col_b) VIRTUAL, -- not supported in postgres
col_e INT GENERATED ALWAYS AS (col_a + col_b) STORED
);
```
<img width="1005" alt="image" src="https://github.com/user-attachments/assets/0983ea6d-70d4-434c-9c45-40e97e8dd5bc">
## Virtual Keyword
As mentioned [here](https://github.com/tobymao/sqlglot/pull/4465#pullrequestreview-2472079771) it likely makes sense to also add support for the `VIRTUAL` keyword here, it is an explicit version of the absence of the default behaviour, it's also mutually exclusive to the `STORED` keyword.
## Dialects
- [MySQL](https://dev.mysql.com/doc/refman/8.4/en/create-table-generated-columns.html) , support for both `stored` and `virtual` keyword
- [SQLite](https://www.sqlite.org/syntax/column-constraint.html) , support for both `stored` and `virtual` keyword
- [Postgres](https://www.postgresql.org/docs/current/ddl-generated-columns.html), support for `stored` but no support for `virtual` columns in tables
- They said _"mate, use a view..."_, to use generated columns it seems you need to use the `stored` keyword
- The postgresql docs say:
> _Thus, a virtual generated column is similar to a view and a stored generated column is similar to a materialized view (except that it is always updated automatically). **PostgreSQL currently implements only stored generated columns**._
- [BigQuery](https://cloud.google.com/spanner/docs/generated-column/how-to), support for `stored` but no `virtual` explicit keyword
- Confusingly they suggest postgres supports this syntax in their docs (maybe they use to?)
### Dialects without support
- [T-SQL](https://learn.microsoft.com/en-us/sql/t-sql/statements/create-table-transact-sql?view=sql-server-ver16#full-syntax)
- [Spark/DB](https://docs.databricks.com/en/delta/generated-columns.html#create-a-table-with-generated-columns)
- [Redshift](https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html)
- [Clickhouse](https://clickhouse.com/docs/en/sql-reference/statements/alter/column#materialize-column)
# The current behaviour
## Reproducible example
```python
from sqlglot import parse
parse("""
CREATE TABLE IF NOT EXISTS example (
col_a INT NOT NULL,
col_b INT NOT NULL,
col_c INT GENERATED ALWAYS AS (col_a + col_b) STORED
);
""", read='postgres')
```
## Example stacktrace
```
Traceback (most recent call last):
...
File "/Users/angus/code/jupyter/env/lib/python3.12/site-packages/sqlglot/__init__.py", line 102, in parse
return Dialect.get_or_raise(read or dialect).parse(sql, **opts)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/angus/code/jupyter/env/lib/python3.12/site-packages/sqlglot/dialects/dialect.py", line 919, in parse
return self.parser(**opts).parse(self.tokenize(sql), sql)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/angus/code/jupyter/env/lib/python3.12/site-packages/sqlglot/parser.py", line 1398, in parse
return self._parse(
^^^^^^^^^^^^
File "/Users/angus/code/jupyter/env/lib/python3.12/site-packages/sqlglot/parser.py", line 1467, in _parse
expressions.append(parse_method(self))
^^^^^^^^^^^^^^^^^^
File "/Users/angus/code/jupyter/env/lib/python3.12/site-packages/sqlglot/parser.py", line 1699, in _parse_statement
return self.STATEMENT_PARSERS[self._prev.token_type](self)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/angus/code/jupyter/env/lib/python3.12/site-packages/sqlglot/parser.py", line 779, in <lambda>
TokenType.CREATE: lambda self: self._parse_create(),
^^^^^^^^^^^^^^^^^^^^
File "/Users/angus/code/jupyter/env/lib/python3.12/site-packages/sqlglot/parser.py", line 1854, in _parse_create
this = self._parse_schema(this=table_parts)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/angus/code/jupyter/env/lib/python3.12/site-packages/sqlglot/parser.py", line 5379, in _parse_schema
self._match_r_paren()
File "/Users/angus/code/jupyter/env/lib/python3.12/site-packages/sqlglot/parser.py", line 7172, in _match_r_paren
self.raise_error("Expecting )")
File "/Users/angus/code/jupyter/env/lib/python3.12/site-packages/sqlglot/parser.py", line 1511, in raise_error
raise error
sqlglot.errors.ParseError: Expecting ). Line 4, Col: 53.
ample (
col_a INT NOT NULL,
col_b INT NOT NULL,
col_c INT GENERATED ALWAYS AS (col_a + col_b) STORED
);
```
| tobymao/sqlglot | diff --git a/tests/dialects/test_postgres.py b/tests/dialects/test_postgres.py
index 8f84d9ff..66ded239 100644
--- a/tests/dialects/test_postgres.py
+++ b/tests/dialects/test_postgres.py
@@ -1047,6 +1047,7 @@ class TestPostgres(Validator):
self.validate_identity("CREATE TABLE tbl (col INT UNIQUE NULLS NOT DISTINCT DEFAULT 9.99)")
self.validate_identity("CREATE TABLE tbl (col UUID UNIQUE DEFAULT GEN_RANDOM_UUID())")
self.validate_identity("CREATE TABLE tbl (col UUID, UNIQUE NULLS NOT DISTINCT (col))")
+ self.validate_identity("CREATE TABLE tbl (col_a INT GENERATED ALWAYS AS (1 + 2) STORED)")
self.validate_identity("CREATE INDEX CONCURRENTLY ix_table_id ON tbl USING btree(id)")
self.validate_identity(
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 25.32 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
identify==2.6.9
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
Pygments==2.19.1
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.7.2
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@3945acc4a0dfd58147de929c9a2c71734d8f1ade#egg=sqlglot
tomli==2.2.1
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- identify==2.6.9
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pygments==2.19.1
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.7.2
- six==1.17.0
- tomli==2.2.1
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_postgres.py::TestPostgres::test_ddl"
] | [] | [
"tests/dialects/test_postgres.py::TestPostgres::test_array_length",
"tests/dialects/test_postgres.py::TestPostgres::test_array_offset",
"tests/dialects/test_postgres.py::TestPostgres::test_bool_or",
"tests/dialects/test_postgres.py::TestPostgres::test_operator",
"tests/dialects/test_postgres.py::TestPostgres::test_postgres",
"tests/dialects/test_postgres.py::TestPostgres::test_regexp_binary",
"tests/dialects/test_postgres.py::TestPostgres::test_rows_from",
"tests/dialects/test_postgres.py::TestPostgres::test_string_concat",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest",
"tests/dialects/test_postgres.py::TestPostgres::test_unnest_json_array",
"tests/dialects/test_postgres.py::TestPostgres::test_variance"
] | [] | MIT License | 20,400 | 326 | [
"sqlglot/dialects/postgres.py"
] |
|
narwhals-dev__narwhals-1515 | 1d9ac1c8adb745baa27df935fe2e41cf4e423b2a | 2024-12-05 18:30:21 | 75c3e7cae310baa522669036a77cc60f5259f420 | MarcoGorelli: thanks @thevro , looks like there's some test failures
thevro: > thanks @thevro , looks like there's some test failures
The test that fails is the one I added to confirm the exception demonstrated in the PR description. Instead of raising an AttributeError in `narwhals/_pandas_like/series.py`, as described in #1493, we now raise an AssertionError in `narwhals/series.py`. Should I change the test so that it expects the new exception (which is still a bug), or shall we try to pass the test in its current form within this PR?
Here is the reason we still get an error:
Yesterday I initially applied the same approach of `isinstance`-comparing to `numbers.Number` instead of to `int` in _both_ files, and that caused code equivalent to the test I added today to pass. However, it occurred to me that pd.Series can be indexed by other scalars such as strings (scalar != non-Iterable, sadly), so a `np.isscalar` approach is more appropriate. But `narwhals/series.py` must work without numpy installed. I see two possible fixes:
1. if `__getitem__` in `narwhals/_pandas_like/series.py` receives a scalar, the code in `narwhals/series.py` shouldn't even be called.
2. `narwhals/series.py` should also check for scalars in general instead of just `int`s. We would need to find a way to do this without using numpy.
MarcoGorelli: I think you could just make a function in `narwhals/dependencies.py` called `is_numpy_scalar` in which you just do
```
return (np := get_numpy()) is not None and np.isscalar(obj)
```
Like this, it checks if `obj` is a NumPy scalar without importing NumPy. Like this, you can safely use it in `narwhals/series.py` | diff --git a/narwhals/_pandas_like/series.py b/narwhals/_pandas_like/series.py
index 303583a9..bd799039 100644
--- a/narwhals/_pandas_like/series.py
+++ b/narwhals/_pandas_like/series.py
@@ -19,6 +19,7 @@ from narwhals._pandas_like.utils import rename
from narwhals._pandas_like.utils import select_columns_by_name
from narwhals._pandas_like.utils import set_axis
from narwhals._pandas_like.utils import to_datetime
+from narwhals.dependencies import is_numpy_scalar
from narwhals.typing import CompliantSeries
from narwhals.utils import Implementation
from narwhals.utils import import_dtypes_module
@@ -114,7 +115,7 @@ class PandasLikeSeries(CompliantSeries):
def __getitem__(self, idx: slice | Sequence[int]) -> Self: ...
def __getitem__(self, idx: int | slice | Sequence[int]) -> Any | Self:
- if isinstance(idx, int):
+ if isinstance(idx, int) or is_numpy_scalar(idx):
return self._native_series.iloc[idx]
return self._from_native_series(self._native_series.iloc[idx])
diff --git a/narwhals/dependencies.py b/narwhals/dependencies.py
index 61937f08..0c5d1172 100644
--- a/narwhals/dependencies.py
+++ b/narwhals/dependencies.py
@@ -223,6 +223,11 @@ def is_numpy_array(arr: Any) -> TypeGuard[np.ndarray]:
return (np := get_numpy()) is not None and isinstance(arr, np.ndarray)
+def is_numpy_scalar(scalar: Any) -> TypeGuard[np.generic]:
+ """Check whether `scalar` is a NumPy Scalar without importing NumPy."""
+ return (np := get_numpy()) is not None and np.isscalar(scalar)
+
+
def is_pandas_like_dataframe(df: Any) -> bool:
"""Check whether `df` is a pandas-like DataFrame without doing any imports.
diff --git a/narwhals/series.py b/narwhals/series.py
index 2797ceaf..2ef2efc7 100644
--- a/narwhals/series.py
+++ b/narwhals/series.py
@@ -11,6 +11,7 @@ from typing import Sequence
from typing import TypeVar
from typing import overload
+from narwhals.dependencies import is_numpy_scalar
from narwhals.dtypes import _validate_dtype
from narwhals.typing import IntoSeriesT
from narwhals.utils import _validate_rolling_arguments
@@ -160,7 +161,9 @@ class Series(Generic[IntoSeriesT]):
]
]
"""
- if isinstance(idx, int):
+ if isinstance(idx, int) or (
+ is_numpy_scalar(idx) and idx.dtype.kind in ("i", "u")
+ ):
return self._compliant_series[idx]
return self._from_compliant_series(self._compliant_series[idx])
| bug: can't use `Series.__getitem__` with numpy scalars
The consequence of this is:
```python
In [18]: import pandas as pd
In [19]: import narwhals as nw
In [20]: s = pd.Series([0,1,2])
In [21]: snw = nw.from_native(s, series_only=True)
In [22]: snw[snw.min()]
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
Cell In[22], line 1
----> 1 snw[snw.min()]
File ~/scratch/.venv/lib/python3.12/site-packages/narwhals/series.py:72, in Series.__getitem__(self, idx)
70 if isinstance(idx, int):
71 return self._compliant_series[idx]
---> 72 return self._from_compliant_series(self._compliant_series[idx])
File ~/scratch/.venv/lib/python3.12/site-packages/narwhals/_pandas_like/series.py:128, in PandasLikeSeries.__getitem__(self, idx)
126 if isinstance(idx, int):
127 return self._native_series.iloc[idx]
--> 128 return self._from_native_series(self._native_series.iloc[idx])
File ~/scratch/.venv/lib/python3.12/site-packages/narwhals/_pandas_like/series.py:139, in PandasLikeSeries._from_native_series(self, series)
138 def _from_native_series(self, series: Any) -> Self:
--> 139 return self.__class__(
140 series,
141 implementation=self._implementation,
142 backend_version=self._backend_version,
143 dtypes=self._dtypes,
144 )
File ~/scratch/.venv/lib/python3.12/site-packages/narwhals/_pandas_like/series.py:87, in PandasLikeSeries.__init__(self, native_series, implementation, backend_version, dtypes)
79 def __init__(
80 self,
81 native_series: Any,
(...)
85 dtypes: DTypes,
86 ) -> None:
---> 87 self._name = native_series.name
88 self._native_series = native_series
89 self._implementation = implementation
AttributeError: 'numpy.int64' object has no attribute 'name'
```
The place this needs changing is in the definition of `Series.__getitem__` for `_pandas_like` | narwhals-dev/narwhals | diff --git a/tests/series_only/slice_test.py b/tests/series_only/slice_test.py
index 1eb5e0ba..c4f8dfbe 100644
--- a/tests/series_only/slice_test.py
+++ b/tests/series_only/slice_test.py
@@ -39,3 +39,9 @@ def test_slice(constructor_eager: ConstructorEager) -> None:
def test_getitem_arrow_scalar() -> None:
result = nw.from_native(pa.chunked_array([[1]]), series_only=True)[0]
assert isinstance(result, int)
+
+
+def test_index(constructor_eager: ConstructorEager) -> None:
+ df = constructor_eager({"a": [0, 1, 2]})
+ snw = nw.from_native(df, eager_only=True)["a"]
+ assert snw[snw[0]] == 0
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 3
} | 1.18 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
cfgv==3.4.0
click==8.1.8
cloudpickle==3.1.1
covdefaults==2.3.0
coverage==7.8.0
dask==2024.8.0
dask-expr==1.1.10
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
filelock==3.18.0
fsspec==2025.3.1
hypothesis==6.130.5
identify==2.6.9
importlib_metadata==8.6.1
iniconfig==2.1.0
joblib==1.4.2
locket==1.0.0
-e git+https://github.com/narwhals-dev/narwhals.git@1d9ac1c8adb745baa27df935fe2e41cf4e423b2a#egg=narwhals
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
partd==1.4.2
platformdirs==4.3.7
pluggy==1.5.0
polars==1.26.0
pre_commit==4.2.0
py4j==0.10.9.7
pyarrow==19.0.1
pyarrow-stubs==17.19
pyspark==3.5.5
pytest==8.3.5
pytest-cov==6.0.0
pytest-env==1.1.5
pytest-randomly==3.16.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
sortedcontainers==2.4.0
threadpoolctl==3.6.0
tomli==2.2.1
toolz==1.0.0
tqdm==4.67.1
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
zipp==3.21.0
| name: narwhals
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- cfgv==3.4.0
- click==8.1.8
- cloudpickle==3.1.1
- covdefaults==2.3.0
- coverage==7.8.0
- dask==2024.8.0
- dask-expr==1.1.10
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- fsspec==2025.3.1
- hypothesis==6.130.5
- identify==2.6.9
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- joblib==1.4.2
- locket==1.0.0
- narwhals==1.18.3
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- partd==1.4.2
- platformdirs==4.3.7
- pluggy==1.5.0
- polars==1.26.0
- pre-commit==4.2.0
- py4j==0.10.9.7
- pyarrow==19.0.1
- pyarrow-stubs==17.19
- pyspark==3.5.5
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-env==1.1.5
- pytest-randomly==3.16.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- sortedcontainers==2.4.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- toolz==1.0.0
- tqdm==4.67.1
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/narwhals
| [
"tests/series_only/slice_test.py::test_index[pandas_nullable_constructor]",
"tests/series_only/slice_test.py::test_index[pandas_constructor]"
] | [] | [
"tests/series_only/slice_test.py::test_index[pyarrow_table_constructor]",
"tests/series_only/slice_test.py::test_getitem_arrow_scalar",
"tests/series_only/slice_test.py::test_slice[polars_eager_constructor]",
"tests/series_only/slice_test.py::test_index[polars_eager_constructor]",
"tests/series_only/slice_test.py::test_slice[pandas_nullable_constructor]",
"tests/series_only/slice_test.py::test_index[pandas_pyarrow_constructor]",
"tests/series_only/slice_test.py::test_slice[pyarrow_table_constructor]",
"tests/series_only/slice_test.py::test_slice[pandas_pyarrow_constructor]",
"tests/series_only/slice_test.py::test_slice[pandas_constructor]"
] | [] | MIT License | 20,409 | 729 | [
"narwhals/_pandas_like/series.py",
"narwhals/dependencies.py",
"narwhals/series.py"
] |
radiocosmology__alpenhorn-268 | ffce1be47ff3c39d48ce7a3cc3370abfd9e16be7 | 2024-12-05 21:31:00 | ffce1be47ff3c39d48ce7a3cc3370abfd9e16be7 | diff --git a/alpenhorn/io/lustrehsm.py b/alpenhorn/io/lustrehsm.py
index b51ba19..5dfb504 100644
--- a/alpenhorn/io/lustrehsm.py
+++ b/alpenhorn/io/lustrehsm.py
@@ -84,6 +84,9 @@ class LustreHSMNodeIO(LustreQuotaNodeIO):
* release_check_count : integer
The number of files to check at a time when doing idle HSM status
update (see idle_update()). Default is 100.
+ * restore_wait : integer
+ The number of seconds to wait between checking if a restore request
+ has completed. Default is 600 seconds (10 minutes).
"""
remote_class = LustreHSMNodeRemote
@@ -106,8 +109,15 @@ class LustreHSMNodeIO(LustreQuotaNodeIO):
# For informational purposes. Keys are elements in self._restoring.
self._restore_start = {}
+ self._restore_wait_time = int(config.get("restore_wait", 600))
+ if self._restore_wait_time < 1:
+ raise ValueError(
+ "io_config key 'restore_wait' non-positive "
+ f"(={self._restore_wait_time})"
+ )
+
# For idle-time HSM state updates
- self._nrelease = config.get("release_check_count", 100)
+ self._nrelease = int(config.get("release_check_count", 100))
if self._nrelease < 1:
raise ValueError(
f"io_config key 'release_check_count' non-positive (={self._nrelease})"
@@ -411,7 +421,7 @@ class LustreHSMNodeIO(LustreQuotaNodeIO):
restore_wait = node_io._restore_wait(copy)
while restore_wait:
# Wait for a bit
- yield 600
+ yield self._restore_wait_time
# Now check again
restore_wait = node_io._restore_wait(copy)
@@ -581,7 +591,7 @@ class LustreHSMNodeIO(LustreQuotaNodeIO):
restore_wait = node_io._restore_wait(copy)
while restore_wait:
# Wait for a bit
- yield 600
+ yield self._restore_wait_time
# Now check again
restore_wait = node_io._restore_wait(copy)
| Parameterise HSM restore recheck-time
>The `yield 600` (which occurs in other places too) is the re-check time for things restored from HSM. That might be worth an I/O config field.
_Originally posted by @ketiltrout in https://github.com/radiocosmology/alpenhorn/issues/218#issuecomment-2471363589_ | radiocosmology/alpenhorn | diff --git a/tests/io/test_lustrehsmnode.py b/tests/io/test_lustrehsmnode.py
index e0deabe..bc4106b 100644
--- a/tests/io/test_lustrehsmnode.py
+++ b/tests/io/test_lustrehsmnode.py
@@ -59,7 +59,7 @@ def test_init_no_headroom(have_lfs, simplenode):
def test_init_bad_release_count(simplenode, have_lfs):
- """Check for bad realse_check_count."""
+ """Check for bad release_check_count."""
simplenode.io_class = "LustreHSM"
simplenode.io_config = (
@@ -70,6 +70,18 @@ def test_init_bad_release_count(simplenode, have_lfs):
UpdateableNode(None, simplenode)
+def test_init_bad_restore_wait(simplenode, have_lfs):
+ """Check for bad restore_wait."""
+
+ simplenode.io_class = "LustreHSM"
+ simplenode.io_config = (
+ '{"quota_group": "qgroup", "headroom": 300000, "restore_wait": -1}'
+ )
+
+ with pytest.raises(ValueError):
+ UpdateableNode(None, simplenode)
+
+
def test_release_files_okay(queue, node):
"""Test running release_files when we're under headroom"""
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.11",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/radiocosmology/alpenhorn.git@ffce1be47ff3c39d48ce7a3cc3370abfd9e16be7#egg=alpenhorn
bcrypt==4.3.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
chimedb @ git+https://github.com/chime-experiment/chimedb.git@d82f48eb0599393723e7ee5d756aff6c6830db32
click==8.1.8
concurrent-log-handler==0.9.25
coverage==7.8.0
cryptography==44.0.2
docker==7.1.0
execnet==2.1.1
idna==3.10
iniconfig==2.1.0
mysql-connector-python==8.0.29
packaging==24.2
paramiko==3.5.1
peewee==3.17.9
pluggy==1.5.0
portalocker==3.1.1
protobuf==6.30.2
pycparser==2.22
pyfakefs==5.8.0
PyNaCl==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
PyYAML==6.0.2
requests==2.32.3
sshtunnel==0.4.0
tabulate==0.9.0
ujson==5.10.0
urllib3==2.3.0
watchdog==6.0.0
| name: alpenhorn
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py311h06a4308_0
- python=3.11.11=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py311h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py311h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alpenhorn==2.0.0a1
- bcrypt==4.3.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- chimedb==24.8.0.post2+git.d82f48eb
- click==8.1.8
- concurrent-log-handler==0.9.25
- coverage==7.8.0
- cryptography==44.0.2
- docker==7.1.0
- execnet==2.1.1
- idna==3.10
- iniconfig==2.1.0
- mysql-connector-python==8.0.29
- packaging==24.2
- paramiko==3.5.1
- peewee==3.17.9
- pluggy==1.5.0
- portalocker==3.1.1
- protobuf==6.30.2
- pycparser==2.22
- pyfakefs==5.8.0
- pynacl==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- pyyaml==6.0.2
- requests==2.32.3
- sshtunnel==0.4.0
- tabulate==0.9.0
- ujson==5.10.0
- urllib3==2.3.0
- watchdog==6.0.0
prefix: /opt/conda/envs/alpenhorn
| [
"tests/io/test_lustrehsmnode.py::test_init_bad_restore_wait"
] | [
"tests/io/test_lustrehsmnode.py::test_filesize"
] | [
"tests/io/test_lustrehsmnode.py::test_init_no_headroom",
"tests/io/test_lustrehsmnode.py::test_init_bad_release_count",
"tests/io/test_lustrehsmnode.py::test_release_files_okay",
"tests/io/test_lustrehsmnode.py::test_release_files",
"tests/io/test_lustrehsmnode.py::test_before_update",
"tests/io/test_lustrehsmnode.py::test_open_binary",
"tests/io/test_lustrehsmnode.py::test_open_text",
"tests/io/test_lustrehsmnode.py::test_check_missing",
"tests/io/test_lustrehsmnode.py::test_check_ready_restored",
"tests/io/test_lustrehsmnode.py::test_check_released",
"tests/io/test_lustrehsmnode.py::test_check_ready_released",
"tests/io/test_lustrehsmnode.py::test_ready_path",
"tests/io/test_lustrehsmnode.py::test_ready_pull_restored",
"tests/io/test_lustrehsmnode.py::test_ready_pull_restoring",
"tests/io/test_lustrehsmnode.py::test_ready_pull_released",
"tests/io/test_lustrehsmnode.py::test_idle_update_empty",
"tests/io/test_lustrehsmnode.py::test_idle_update_ready",
"tests/io/test_lustrehsmnode.py::test_idle_update_not_ready",
"tests/io/test_lustrehsmnode.py::test_hsm_restore_twice",
"tests/io/test_lustrehsmnode.py::test_hsm_restore_timeout",
"tests/io/test_lustrehsmnode.py::test_hsm_restore_fail"
] | [] | MIT License | 20,413 | 565 | [
"alpenhorn/io/lustrehsm.py"
] |
|
Future-House__aviary-142 | dcb34e07f47f77b9cc78b65670200467e69f5e17 | 2024-12-06 01:08:41 | dcb34e07f47f77b9cc78b65670200467e69f5e17 | maykcaldas: However, `aviary_internal` uses this function in two envs | diff --git a/src/aviary/core.py b/src/aviary/core.py
index 8c3369d..587e76b 100644
--- a/src/aviary/core.py
+++ b/src/aviary/core.py
@@ -20,7 +20,6 @@ from aviary.message import EnvStateMessage, MalformedMessageError, Message, join
from aviary.render import Renderer
from aviary.tools import (
INVALID_TOOL_NAME,
- EvalAnswerMode,
FunctionInfo,
Messages,
MessagesAdapter,
@@ -35,10 +34,15 @@ from aviary.tools import (
ToolSelector,
ToolSelectorLedger,
argref_by_name,
- eval_answer,
wraps_doc_only,
)
-from aviary.utils import encode_image_to_base64, is_coroutine_callable, partial_format
+from aviary.utils import (
+ EvalAnswerMode,
+ encode_image_to_base64,
+ eval_answer,
+ is_coroutine_callable,
+ partial_format,
+)
__all__ = [
"INVALID_TOOL_NAME",
@@ -77,6 +81,7 @@ __all__ = [
"argref_by_name",
"encode_image_to_base64",
"eval_answer",
+ "eval_answer",
"fenv",
"is_coroutine_callable",
"join",
diff --git a/src/aviary/env.py b/src/aviary/env.py
index e38edb3..b8be875 100644
--- a/src/aviary/env.py
+++ b/src/aviary/env.py
@@ -7,9 +7,9 @@ import json
import logging
import random
from abc import ABC, abstractmethod
-from collections.abc import Iterator
+from collections.abc import Awaitable, Iterator
from copy import deepcopy
-from typing import Annotated, Generic, Self, TypeAlias, TypeVar, cast
+from typing import Annotated, Any, Generic, Self, TypeAlias, TypeVar, cast
from pydantic import (
BaseModel,
@@ -39,6 +39,13 @@ logger = logging.getLogger(__name__)
Serializable: TypeAlias = dict | list | int | float | str | bool | BaseModel
+async def maybe_wait_for(future: Awaitable, timeout: float | None) -> Any:
+ """Apply a timeout to an awaitable if one is provided, otherwise await indefinitely."""
+ if timeout is None:
+ return await future
+ return await asyncio.wait_for(future, timeout)
+
+
class Frame(BaseModel):
"""A frame is a snapshot at a given timestep. The name comes from video frame."""
@@ -158,6 +165,7 @@ class Environment(ABC, Generic[TEnvState]):
concurrency: bool = True,
handle_tool_exc: bool = False,
handle_invalid_tool_calls: bool = True,
+ exec_timeout: float | None = None,
**function_kwargs,
) -> list[ToolResponseMessage]:
"""
@@ -171,6 +179,8 @@ class Environment(ABC, Generic[TEnvState]):
ToolResponseMessage.
handle_invalid_tool_calls: Flag to handle invalid tool calls by returning
a ToolResponseMessage with a note that the tool requested doesn't exist
+ exec_timeout: Timeout for each tool call in seconds. If None, no timeout.
+ Note that handle_tool_exec can be used to catch TimeoutErrors.
**function_kwargs: Keyword arguments to pass to all tool functions.
Returns:
@@ -203,15 +213,21 @@ class Environment(ABC, Generic[TEnvState]):
tool_exc: Exception | None = None
try:
if is_coroutine_callable(tool._tool_fn):
- content = await tool._tool_fn(
- **tool_call.function.arguments, **filtered_kwargs
+ content = await maybe_wait_for(
+ tool._tool_fn(
+ **tool_call.function.arguments, **filtered_kwargs
+ ),
+ exec_timeout,
)
else:
# If the function is synchronous, run on a thread
- content = await asyncio.to_thread(
- tool._tool_fn,
- **tool_call.function.arguments,
- **filtered_kwargs,
+ content = await maybe_wait_for(
+ asyncio.to_thread(
+ tool._tool_fn,
+ **tool_call.function.arguments,
+ **filtered_kwargs,
+ ),
+ exec_timeout,
)
except Exception as exc:
if not handle_tool_exc:
diff --git a/src/aviary/tools/__init__.py b/src/aviary/tools/__init__.py
index c03b8cd..e18c552 100644
--- a/src/aviary/tools/__init__.py
+++ b/src/aviary/tools/__init__.py
@@ -14,11 +14,10 @@ from .base import (
ToolsAdapter,
wraps_doc_only,
)
-from .utils import EvalAnswerMode, ToolSelector, ToolSelectorLedger, eval_answer
+from .utils import ToolSelector, ToolSelectorLedger
__all__ = [
"INVALID_TOOL_NAME",
- "EvalAnswerMode",
"FunctionInfo",
"Messages",
"MessagesAdapter",
@@ -33,6 +32,5 @@ __all__ = [
"Tools",
"ToolsAdapter",
"argref_by_name",
- "eval_answer",
"wraps_doc_only",
]
diff --git a/src/aviary/tools/utils.py b/src/aviary/tools/utils.py
index e0fe363..36a4987 100644
--- a/src/aviary/tools/utils.py
+++ b/src/aviary/tools/utils.py
@@ -1,5 +1,4 @@
from collections.abc import Callable
-from enum import StrEnum
from functools import partial
from typing import TYPE_CHECKING, Any, ClassVar, cast
@@ -21,103 +20,6 @@ if TYPE_CHECKING:
from litellm import ModelResponse
-class EvalAnswerMode(StrEnum):
- EXACT = "exact" # strings must match exactly
- CONTAINS = "contains" # the correct answer is contained in the supplied answer
- LLM = "llm" # Ask an LLM to evaluate
- LLM_SCORE = "llm-score" # Ask an LLM to evaluate and return the score (normalized)
-
-
-LLM_EVAL_CONFIG = {
- "prompt": (
- "Here is a question, the correct answer to the question, and a proposed answer"
- " to the question. Please tell me if the proposed answer is correct, given the"
- " correct answer. ONLY SAY 'YES' OR 'NO'. No other output is permitted."
- "\n\nQuestion: {question}"
- "\n\nCorrect answer: {correct_answer}"
- "\n\nProposed answer: {proposed_answer}"
- ),
- "model": "gpt-4o-mini",
- "temperature": 0,
-}
-
-LLM_SCORE_EVAL_CONFIG = {
- "prompt": (
- "Here is a question, the correct answer to the question, and a rubric for"
- " evaluating the question. Judge the proposed answer based on the given rubric."
- " Give a score from 0 to 10. No other output is permitted."
- "\n\nQuestion: {question}"
- "\n\nRubric: {correct_answer}"
- "\n\nProposed answer: {proposed_answer}"
- ),
- "model": "gpt-4o-mini",
- "temperature": 0,
- "max_score": 10,
-}
-
-
-async def eval_answer(
- proposed: str,
- correct: str,
- question: str | None = None,
- eval_mode: EvalAnswerMode = EvalAnswerMode.CONTAINS,
- llm_eval_config: dict | None = None,
-) -> float:
- """Evaluate a proposed answer against a correct answer.
-
- Will return 0 or 1, except for llm-score which should be between 0 and 1
- """
- if eval_mode in {EvalAnswerMode.LLM, EvalAnswerMode.LLM_SCORE}:
- try:
- from litellm import acompletion
- except ImportError as e:
- raise ImportError(
- "eval_answer requires the 'llm' extra for 'litellm'. Please:"
- " `pip install aviary[llm]`."
- ) from e
- if question is None:
- raise ValueError("Question must be provided for LLM evaluation mode.")
- default_config = (
- LLM_EVAL_CONFIG
- if eval_mode == EvalAnswerMode.LLM
- else LLM_SCORE_EVAL_CONFIG
- )
- config = llm_eval_config or default_config
- prompt = cast(str, config.get("prompt", default_config["prompt"])).format(
- question=question,
- correct_answer=correct,
- proposed_answer=proposed,
- )
- response = await acompletion(
- model=config.get("model", default_config["model"]),
- temperature=config.get("temperature", default_config["temperature"]),
- messages=[{"content": prompt, "role": "user"}],
- )
- if eval_mode == EvalAnswerMode.LLM:
- return await eval_answer(
- response.choices[0].message.content.strip().casefold(),
- "yes",
- eval_mode=EvalAnswerMode.EXACT,
- )
- try:
- return float(response.choices[0].content.strip()) / float(
- config.get("max_score", default_config["max_score"]) # type: ignore[arg-type]
- )
- except ValueError:
- return 0
-
- gt = correct.strip().casefold()
- pred = proposed.strip().casefold()
-
- if eval_mode == EvalAnswerMode.EXACT:
- return float(pred == gt)
-
- if eval_mode == EvalAnswerMode.CONTAINS:
- return float(gt in pred)
-
- raise RuntimeError(f"Invalid evaluation mode: {eval_mode}")
-
-
class ToolSelector:
"""Simple entity to select a tool based on messages."""
diff --git a/src/aviary/utils.py b/src/aviary/utils.py
index e7d280b..4d4fb3e 100644
--- a/src/aviary/utils.py
+++ b/src/aviary/utils.py
@@ -2,12 +2,48 @@ import base64
import contextlib
import inspect
import io
-from typing import TYPE_CHECKING, Any
+from enum import StrEnum
+from typing import TYPE_CHECKING, Any, cast
if TYPE_CHECKING:
import numpy as np
+LLM_EVAL_CONFIG = {
+ "prompt": (
+ "Here is a question, the correct answer to the question, and a proposed answer"
+ " to the question. Please tell me if the proposed answer is correct, given the"
+ " correct answer. ONLY SAY 'YES' OR 'NO'. No other output is permitted."
+ "\n\nQuestion: {question}"
+ "\n\nCorrect answer: {correct_answer}"
+ "\n\nProposed answer: {proposed_answer}"
+ ),
+ "model": "gpt-4o-mini",
+ "temperature": 0,
+}
+
+LLM_SCORE_EVAL_CONFIG = {
+ "prompt": (
+ "Here is a question, the correct answer to the question, and a rubric for"
+ " evaluating the question. Judge the proposed answer based on the given rubric."
+ " Give a score from 0 to 10. No other output is permitted."
+ "\n\nQuestion: {question}"
+ "\n\nRubric: {correct_answer}"
+ "\n\nProposed answer: {proposed_answer}"
+ ),
+ "model": "gpt-4o-mini",
+ "temperature": 0,
+ "max_score": 10,
+}
+
+
+class EvalAnswerMode(StrEnum):
+ EXACT = "exact" # strings must match exactly
+ CONTAINS = "contains" # the correct answer is contained in the supplied answer
+ LLM = "llm" # Ask an LLM to evaluate
+ LLM_SCORE = "llm-score" # Ask an LLM to evaluate and return the score (normalized)
+
+
def partial_format(value: str, **formats: dict[str, Any]) -> str:
"""Partially format a string given a variable amount of formats."""
for template_key, template_value in formats.items():
@@ -41,3 +77,65 @@ def is_coroutine_callable(obj) -> bool:
if callable(obj):
return inspect.iscoroutinefunction(obj.__call__)
return False
+
+
+async def eval_answer(
+ proposed: str,
+ correct: str,
+ question: str | None = None,
+ eval_mode: EvalAnswerMode = EvalAnswerMode.CONTAINS,
+ llm_eval_config: dict | None = None,
+) -> float:
+ """Evaluate a proposed answer against a correct answer.
+
+ Will return 0 or 1, except for llm-score which should be between 0 and 1
+ """
+ if eval_mode in {EvalAnswerMode.LLM, EvalAnswerMode.LLM_SCORE}:
+ try:
+ from litellm import acompletion
+ except ImportError as e:
+ raise ImportError(
+ "eval_answer requires the 'llm' extra for 'litellm'. Please:"
+ " `pip install aviary[llm]`."
+ ) from e
+ if question is None:
+ raise ValueError("Question must be provided for LLM evaluation mode.")
+ default_config = (
+ LLM_EVAL_CONFIG
+ if eval_mode == EvalAnswerMode.LLM
+ else LLM_SCORE_EVAL_CONFIG
+ )
+ config = llm_eval_config or default_config
+ prompt = cast(str, config.get("prompt", default_config["prompt"])).format(
+ question=question,
+ correct_answer=correct,
+ proposed_answer=proposed,
+ )
+ response = await acompletion(
+ model=config.get("model", default_config["model"]),
+ temperature=config.get("temperature", default_config["temperature"]),
+ messages=[{"content": prompt, "role": "user"}],
+ )
+ if eval_mode == EvalAnswerMode.LLM:
+ return await eval_answer(
+ response.choices[0].message.content.strip().casefold(),
+ "yes",
+ eval_mode=EvalAnswerMode.EXACT,
+ )
+ try:
+ return float(response.choices[0].content.strip()) / float(
+ config.get("max_score", default_config["max_score"]) # type: ignore[arg-type]
+ )
+ except ValueError:
+ return 0
+
+ gt = correct.strip().casefold()
+ pred = proposed.strip().casefold()
+
+ if eval_mode == EvalAnswerMode.EXACT:
+ return float(pred == gt)
+
+ if eval_mode == EvalAnswerMode.CONTAINS:
+ return float(gt in pred)
+
+ raise RuntimeError(f"Invalid evaluation mode: {eval_mode}")
| Move `eval_answer` from `tools`
Should not live in `tools`, since it's more of an environment function | Future-House/aviary | diff --git a/packages/hotpotqa/tests/test_hotpotqa_env.py b/packages/hotpotqa/tests/test_hotpotqa_env.py
index 9db65c1..f6cdc8d 100644
--- a/packages/hotpotqa/tests/test_hotpotqa_env.py
+++ b/packages/hotpotqa/tests/test_hotpotqa_env.py
@@ -5,7 +5,7 @@ import pytest
from aviary.core import Environment, TaskDataset
from aviary.envs.hotpotqa import HotPotQAEnv
from aviary.envs.hotpotqa.env import HotPotQADataset
-from aviary.tools.utils import EvalAnswerMode
+from aviary.utils import EvalAnswerMode
def test_env_construction() -> None:
diff --git a/tests/test_envs.py b/tests/test_envs.py
index 4e3a4e8..4d34945 100644
--- a/tests/test_envs.py
+++ b/tests/test_envs.py
@@ -30,6 +30,7 @@ from aviary.core import (
ToolSelectorLedger,
)
from aviary.dataset_server import TaskDatasetServer
+from aviary.tools import Messages
from tests import CILLMModelNames
from tests.conftest import VCR_DEFAULT_MATCH_ON
@@ -236,6 +237,38 @@ async def test_invalid_tool_call(
assert o.tool_call_id == t.id
+class SlowEnv(Environment[None]):
+ async def reset(self) -> tuple[list[Message], list[Tool]]:
+ async def aslow_tool() -> None:
+ """I am very slow."""
+ await asyncio.sleep(0.1)
+
+ def slow_tool() -> None:
+ """I am very slow."""
+ time.sleep(0.1)
+
+ self.tools = [Tool.from_function(slow_tool), Tool.from_function(aslow_tool)]
+ return [], self.tools
+
+ async def step(
+ self, action: ToolRequestMessage
+ ) -> tuple[Messages, float, bool, bool]:
+ await self.exec_tool_calls(action, exec_timeout=0.0001)
+
+ return [], 0.0, False, False
+
+
[email protected]
+async def test_tool_exec_timeout() -> None:
+ env = SlowEnv()
+ _, tools = await env.reset()
+
+ for tool in tools:
+ action = ToolRequestMessage(tool_calls=[ToolCall.from_tool(tool)])
+ with pytest.raises(asyncio.TimeoutError):
+ await env.step(action)
+
+
class TestRendering:
class SomeState(BaseModel):
field: int
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 5
} | 0.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-asyncio",
"pytest-recording",
"pytest-subtests",
"pytest-sugar",
"pytest-timer",
"pytest-xdist"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.13",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiohappyeyeballs==2.6.1
aiohttp==3.11.14
aiosignal==1.3.2
annotated-types==0.7.0
anyio==4.9.0
astroid==3.3.9
asttokens==3.0.0
attrs==25.3.0
aviary.gsm8k==0.18.3
aviary.hotpotqa==0.18.3
beautifulsoup4==4.13.3
boto3-stubs==1.37.23
botocore-stubs==1.37.23
certifi==2025.1.31
cfgv==3.4.0
charset-normalizer==3.4.1
click==8.1.8
cloudpickle==3.1.1
colorama==0.4.6
datasets==3.5.0
decorator==5.2.1
dicttoxml==1.7.16
dill==0.3.8
distlib==0.3.9
distro==1.9.0
docstring_parser==0.16
execnet==2.1.1
executing==2.2.0
fastapi==0.115.12
-e git+https://github.com/Future-House/aviary.git@dcb34e07f47f77b9cc78b65670200467e69f5e17#egg=fhaviary
filelock==3.18.0
frozenlist==1.5.0
fsspec==2024.12.0
h11==0.14.0
httpcore==1.0.7
httpx==0.27.2
huggingface-hub==0.30.0
identify==2.6.9
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
ipython==9.0.2
ipython_pygments_lexers==1.1.1
isort==6.0.1
jedi==0.19.2
Jinja2==3.1.6
jiter==0.9.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
litellm==1.65.0
MarkupSafe==3.0.2
matplotlib-inline==0.1.7
mccabe==0.7.0
multidict==6.2.0
multiprocess==0.70.16
mypy==1.15.0
mypy-boto3-s3==1.37.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.2.4
openai==1.69.0
packaging==24.2
pandas==2.2.3
pandas-stubs==2.2.3.250308
parso==0.8.4
pexpect==4.9.0
pillow==11.1.0
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
prompt_toolkit==3.0.50
propcache==0.3.1
ptyprocess==0.7.0
pure_eval==0.2.3
pyarrow==19.0.1
pydantic==2.11.1
pydantic_core==2.33.0
Pygments==2.19.1
pylint==3.3.6
pylint-plugin-utils==0.8.2
pylint-pydantic==0.3.5
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-recording==0.13.2
pytest-subtests==0.14.1
pytest-sugar==1.0.0
pytest-timer==1.0.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
python-dotenv==1.1.0
pytz==2025.2
PyYAML==6.0.2
referencing==0.36.2
refurb==2.0.0
regex==2024.11.6
requests==2.32.3
rpds-py==0.24.0
setuptools==75.8.0
six==1.17.0
sniffio==1.3.1
soupsieve==2.6
stack-data==0.6.3
starlette==0.46.1
tenacity==9.0.0
termcolor==3.0.0
tiktoken==0.9.0
tokenizers==0.21.1
tomlkit==0.13.2
tqdm==4.67.1
traitlets==5.14.3
typeguard==4.4.2
types-awscrt==0.24.2
types-Pillow==10.2.0.20240822
types-pytz==2025.2.0.20250326
types-s3transfer==0.11.4
typing-inspection==0.4.0
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
uvicorn==0.34.0
vcrpy==7.0.0
virtualenv==20.29.3
wcwidth==0.2.13
wheel==0.45.1
wrapt==1.17.2
xxhash==3.5.0
yarl==1.18.3
zipp==3.21.0
| name: aviary
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- expat=2.6.4=h6a678d5_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libmpdec=4.0.0=h5eee18b_0
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py313h06a4308_0
- python=3.13.2=hf623796_100_cp313
- python_abi=3.13=0_cp313
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py313h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py313h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiohappyeyeballs==2.6.1
- aiohttp==3.11.14
- aiosignal==1.3.2
- annotated-types==0.7.0
- anyio==4.9.0
- astroid==3.3.9
- asttokens==3.0.0
- attrs==25.3.0
- aviary-gsm8k==0.18.3
- aviary-hotpotqa==0.18.3
- beautifulsoup4==4.13.3
- boto3-stubs==1.37.23
- botocore-stubs==1.37.23
- certifi==2025.1.31
- cfgv==3.4.0
- charset-normalizer==3.4.1
- click==8.1.8
- cloudpickle==3.1.1
- colorama==0.4.6
- datasets==3.5.0
- decorator==5.2.1
- dicttoxml==1.7.16
- dill==0.3.8
- distlib==0.3.9
- distro==1.9.0
- docstring-parser==0.16
- execnet==2.1.1
- executing==2.2.0
- fastapi==0.115.12
- fhaviary==0.11.1.dev6+gdcb34e0
- filelock==3.18.0
- frozenlist==1.5.0
- fsspec==2024.12.0
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.27.2
- huggingface-hub==0.30.0
- identify==2.6.9
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipython==9.0.2
- ipython-pygments-lexers==1.1.1
- isort==6.0.1
- jedi==0.19.2
- jinja2==3.1.6
- jiter==0.9.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- litellm==1.65.0
- markupsafe==3.0.2
- matplotlib-inline==0.1.7
- mccabe==0.7.0
- multidict==6.2.0
- multiprocess==0.70.16
- mypy==1.15.0
- mypy-boto3-s3==1.37.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.2.4
- openai==1.69.0
- packaging==24.2
- pandas==2.2.3
- pandas-stubs==2.2.3.250308
- parso==0.8.4
- pexpect==4.9.0
- pillow==11.1.0
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- prompt-toolkit==3.0.50
- propcache==0.3.1
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pyarrow==19.0.1
- pydantic==2.11.1
- pydantic-core==2.33.0
- pygments==2.19.1
- pylint==3.3.6
- pylint-plugin-utils==0.8.2
- pylint-pydantic==0.3.5
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-recording==0.13.2
- pytest-subtests==0.14.1
- pytest-sugar==1.0.0
- pytest-timer==1.0.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- python-dotenv==1.1.0
- pytz==2025.2
- pyyaml==6.0.2
- referencing==0.36.2
- refurb==2.0.0
- regex==2024.11.6
- requests==2.32.3
- rpds-py==0.24.0
- six==1.17.0
- sniffio==1.3.1
- soupsieve==2.6
- stack-data==0.6.3
- starlette==0.46.1
- tenacity==9.0.0
- termcolor==3.0.0
- tiktoken==0.9.0
- tokenizers==0.21.1
- tomlkit==0.13.2
- tqdm==4.67.1
- traitlets==5.14.3
- typeguard==4.4.2
- types-awscrt==0.24.2
- types-pillow==10.2.0.20240822
- types-pytz==2025.2.0.20250326
- types-s3transfer==0.11.4
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- tzdata==2025.2
- urllib3==2.3.0
- uvicorn==0.34.0
- vcrpy==7.0.0
- virtualenv==20.29.3
- wcwidth==0.2.13
- wrapt==1.17.2
- xxhash==3.5.0
- yarl==1.18.3
- zipp==3.21.0
prefix: /opt/conda/envs/aviary
| [
"packages/hotpotqa/tests/test_hotpotqa_env.py::test_dataset_from_name",
"tests/test_envs.py::TestDummyEnv::test_dummyenv",
"tests/test_envs.py::TestDummyEnv::test_tool_signatures",
"tests/test_envs.py::TestDummyEnv::test_loading_from_name",
"tests/test_envs.py::TestDummyEnv::test_tool_calling[gpt-4o-mini-2024-07-18]",
"tests/test_envs.py::TestDummyEnv::test_tool_calling[claude-3-haiku-20240307]",
"tests/test_envs.py::test_multiple_calls",
"tests/test_envs.py::test_invalid_tool_call[False]",
"tests/test_envs.py::test_invalid_tool_call[True]",
"tests/test_envs.py::test_tool_exec_timeout",
"tests/test_envs.py::TestRendering::test_serialization[5-5]",
"tests/test_envs.py::TestRendering::test_serialization[5.6-5.6]",
"tests/test_envs.py::TestRendering::test_serialization[hi-hi]",
"tests/test_envs.py::TestRendering::test_serialization[True-True]",
"tests/test_envs.py::TestRendering::test_serialization[state4-serialized4]",
"tests/test_envs.py::TestRendering::test_serialization[state5-serialized5]",
"tests/test_envs.py::TestRendering::test_serialization[state6-serialized6]",
"tests/test_envs.py::TestRendering::test_frame_mutability",
"tests/test_envs.py::TestRendering::test_rendering",
"tests/test_envs.py::TestParallelism::test_exec_tool_calls_handling[claude-3-haiku-20240307]",
"tests/test_envs.py::TestParallelism::test_exec_tool_calls_handling[gpt-4-turbo]",
"tests/test_envs.py::TestParallelism::test_tool_selector_from_model_name[gpt-4o-mini-2024-07-18]",
"tests/test_envs.py::TestTaskDatasetServer::test_start",
"tests/test_envs.py::TestTaskDatasetServer::test_reset_and_step",
"tests/test_envs.py::TestTaskDatasetServer::test_close",
"tests/test_envs.py::TestTaskDatasetServer::test_close_old_envs",
"tests/test_envs.py::TestTaskDatasetServer::test_info"
] | [
"packages/hotpotqa/tests/test_hotpotqa_env.py::test_env_construction",
"packages/hotpotqa/tests/test_hotpotqa_env.py::test_tool_results",
"packages/hotpotqa/tests/test_hotpotqa_env.py::test_answer_evaluation_mode[exact]",
"packages/hotpotqa/tests/test_hotpotqa_env.py::test_answer_evaluation_mode[contains]",
"packages/hotpotqa/tests/test_hotpotqa_env.py::test_answer_evaluation_mode[llm]",
"tests/test_envs.py::TestParallelism::test_tool_selector_with_external_acompletion[gpt-4o-mini-2024-07-18]"
] | [] | [] | Apache License 2.0 | 20,417 | 3,402 | [
"src/aviary/core.py",
"src/aviary/env.py",
"src/aviary/tools/__init__.py",
"src/aviary/tools/utils.py",
"src/aviary/utils.py"
] |
nilearn__nilearn-4903 | c4258e942754e6630ad89891214cc9903e3e203b | 2024-12-06 10:55:39 | d979bacad0dfd0600e35a23f9f08d3df7907e489 | diff --git a/doc/conf.py b/doc/conf.py
index 736060512..1e4463b61 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -183,6 +183,7 @@ linkcheck_ignore = [
"https://pages.saclay.inria.fr/bertrand.thirion/",
"https://pages.stern.nyu.edu/~wgreene/Text/econometricanalysis.htm",
"http://brainomics.cea.fr/localizer/",
+ "https://figshare.com/articles/dataset/Group_multiscale_functional_template_generated_with_BASC_on_the_Cambridge_sample/1285615",
# those are needed because figure cannot take sphinx gallery reference
# as target
r"../auto_examples/.*html",
diff --git a/nilearn/_utils/data_gen.py b/nilearn/_utils/data_gen.py
index 4d69947b0..76e148a7d 100644
--- a/nilearn/_utils/data_gen.py
+++ b/nilearn/_utils/data_gen.py
@@ -515,8 +515,8 @@ def write_fake_fmri_data_and_design(
fmri_files.append(str(file_path / f"fmri_run{i:d}.nii"))
fmri.to_filename(fmri_files[-1])
for i, design in enumerate(design_matrices):
- design_files.append(str(file_path / f"dmtx_{i:d}.csv"))
- design.to_csv(design_files[-1])
+ design_files.append(str(file_path / f"dmtx_{i:d}.tsv"))
+ design.to_csv(design_files[-1], sep="\t", index=False)
return mask_file, fmri_files, design_files
diff --git a/nilearn/_utils/glm.py b/nilearn/_utils/glm.py
index 22d60f292..5e61fb3f8 100644
--- a/nilearn/_utils/glm.py
+++ b/nilearn/_utils/glm.py
@@ -1,3 +1,5 @@
+from pathlib import Path
+
import numpy as np
import pandas as pd
@@ -80,14 +82,13 @@ def _read_events_table(table_path):
ValueError
If file loading fails.
"""
- try:
- # kept for historical reasons, a lot of tests use csv with index column
- loaded = pd.read_csv(table_path, index_col=0)
- except: # noqa: E722
- raise ValueError(f"table path {table_path} could not be loaded")
- if loaded.empty:
- try:
- loaded = pd.read_csv(table_path, sep="\t")
- except: # noqa: E722
- raise ValueError(f"table path {table_path} could not be loaded")
+ table_path = Path(table_path)
+ if table_path.suffix == ".tsv":
+ loaded = pd.read_csv(table_path, sep="\t")
+ elif table_path.suffix == ".csv":
+ loaded = pd.read_csv(table_path)
+ else:
+ raise ValueError(
+ f"Tables to load can only be TSV or CSV.\nGot {table_path}"
+ )
return loaded
| :rotating_light: NIGHTLY DEPENDENCIES TEST: failure on `refs/heads/main`
The run `12199328751` of the workflow testing Nilearn with the nightly build of its dependencies failed on `refs/heads/main`.
You can view the report here: https://github.com/nilearn/nilearn/actions/runs/12199328751
Pinging @nilearn/core
| nilearn/nilearn | diff --git a/nilearn/_utils/tests/test_data_gen.py b/nilearn/_utils/tests/test_data_gen.py
index 58989692d..230a0092a 100644
--- a/nilearn/_utils/tests/test_data_gen.py
+++ b/nilearn/_utils/tests/test_data_gen.py
@@ -583,12 +583,12 @@ def test_generate_fake_fmri_error(rng):
@pytest.mark.parametrize(
"shapes", [[(2, 3, 5, 7)], [(5, 5, 5, 3), (5, 5, 5, 5)]]
)
[email protected]("rk", [1, 3])
[email protected]("rank", [1, 3, 5])
@pytest.mark.parametrize("affine", [None, np.diag([0.5, 0.3, 1, 1])])
-def test_fake_fmri_data_and_design(tmp_path, shapes, rk, affine):
+def test_fake_fmri_data_and_design_generate(shapes, rank, affine):
# test generate
mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
- shapes, rk=rk, affine=affine, random_state=42
+ shapes, rk=rank, affine=affine, random_state=42
)
for fmri, shape in zip(fmri_data, shapes):
@@ -598,11 +598,20 @@ def test_fake_fmri_data_and_design(tmp_path, shapes, rk, affine):
assert_almost_equal(fmri.affine, affine)
for design, shape in zip(design_matrices, shapes):
- assert design.shape == (shape[3], rk)
+ assert design.shape == (shape[3], rank)
- # test write
+
[email protected](
+ "shapes", [[(2, 3, 5, 7)], [(5, 5, 5, 3), (5, 5, 5, 5)]]
+)
[email protected]("rank", [1, 3, 5])
[email protected]("affine", [None, np.diag([0.5, 0.3, 1, 1])])
+def test_fake_fmri_data_and_design_write(tmp_path, shapes, rank, affine):
+ mask, fmri_data, design_matrices = generate_fake_fmri_data_and_design(
+ shapes, rk=rank, affine=affine, random_state=42
+ )
mask_file, fmri_files, design_files = write_fake_fmri_data_and_design(
- shapes, rk=rk, affine=affine, random_state=42, file_path=tmp_path
+ shapes, rk=rank, affine=affine, random_state=42, file_path=tmp_path
)
mask_img = load(mask_file)
@@ -616,7 +625,7 @@ def test_fake_fmri_data_and_design(tmp_path, shapes, rk, affine):
for design_file, design in zip(design_files, design_matrices):
assert_frame_equal(
- pd.read_csv(design_file, index_col=0), design, check_exact=False
+ pd.read_csv(design_file, sep="\t"), design, check_exact=False
)
diff --git a/nilearn/_utils/tests/test_glm.py b/nilearn/_utils/tests/test_glm.py
index 9d25f0028..3bbde9b9d 100644
--- a/nilearn/_utils/tests/test_glm.py
+++ b/nilearn/_utils/tests/test_glm.py
@@ -6,12 +6,16 @@ from nilearn._utils.glm import check_and_load_tables
def test_img_table_checks():
# check tables type and that can be loaded
- with pytest.raises(ValueError, match="table path .* could not be loaded"):
+ with pytest.raises(
+ ValueError, match="Tables to load can only be TSV or CSV."
+ ):
check_and_load_tables([".csv", ".csv"], "")
with pytest.raises(
TypeError,
match="can only be a pandas DataFrame, a Path object or a string",
):
check_and_load_tables([[], pd.DataFrame()], "")
- with pytest.raises(ValueError, match="table path .* could not be loaded"):
+ with pytest.raises(
+ ValueError, match="Tables to load can only be TSV or CSV."
+ ):
check_and_load_tables([".csv", pd.DataFrame()], "")
diff --git a/nilearn/glm/tests/test_first_level.py b/nilearn/glm/tests/test_first_level.py
index f3400b086..0453bb27b 100644
--- a/nilearn/glm/tests/test_first_level.py
+++ b/nilearn/glm/tests/test_first_level.py
@@ -333,7 +333,7 @@ def test_fmri_inputs_type_design_matrices_smoke(tmp_path, shape_4d_default):
)
FirstLevelModel(mask_img=mask).fit(func_img[0], design_matrices=des[0])
FirstLevelModel(mask_img=mask).fit(
- func_img[0], design_matrices=[pd.read_csv(des[0])]
+ func_img[0], design_matrices=[pd.read_csv(des[0], sep="\t")]
)
FirstLevelModel(mask_img=mask).fit(
func_img[0], design_matrices=[Path(des[0])]
@@ -636,9 +636,8 @@ def test_fmri_inputs_design_matrices_tsv(tmp_path, shape_4d_default):
shapes=[shape_4d_default], file_path=tmp_path
)
func_img = func_img[0]
- des = des[0]
- pd.read_csv(des).to_csv(des, sep="\t", index=False)
-
+ des = Path(des[0])
+ pd.read_csv(des, sep="\t").to_csv(des.with_suffix(".csv"), index=False)
FirstLevelModel(mask_img=mask).fit([func_img], design_matrices=des)
@@ -2135,14 +2134,18 @@ def test_check_run_tables_errors():
# check high level wrapper keeps behavior
with pytest.raises(ValueError, match="len.* does not match len.*"):
_check_run_tables([""] * 2, [""], "")
- with pytest.raises(ValueError, match="table path .* could not be loaded"):
+ with pytest.raises(
+ ValueError, match="Tables to load can only be TSV or CSV."
+ ):
_check_run_tables([""] * 2, [".csv", ".csv"], "")
with pytest.raises(
TypeError,
match="can only be a pandas DataFrame, a Path object or a string",
):
_check_run_tables([""] * 2, [[0], pd.DataFrame()], "")
- with pytest.raises(ValueError, match="table path .* could not be loaded"):
+ with pytest.raises(
+ ValueError, match="Tables to load can only be TSV or CSV."
+ ):
_check_run_tables([""] * 2, [".csv", pd.DataFrame()], "")
diff --git a/nilearn/glm/tests/test_second_level.py b/nilearn/glm/tests/test_second_level.py
index 5486e2842..18a07df75 100644
--- a/nilearn/glm/tests/test_second_level.py
+++ b/nilearn/glm/tests/test_second_level.py
@@ -839,7 +839,9 @@ def test_secondlevelmodel_design_matrix_path(img_3d_mni, tmp_path):
@pytest.mark.parametrize("design_matrix", ["foo", Path("foo")])
def test_secondlevelmodel_design_matrix_error_path(img_3d_mni, design_matrix):
second_level_input = [img_3d_mni, img_3d_mni, img_3d_mni]
- with pytest.raises(ValueError, match="table path foo could not be loaded"):
+ with pytest.raises(
+ ValueError, match="Tables to load can only be TSV or CSV."
+ ):
SecondLevelModel().fit(
second_level_input=second_level_input, design_matrix=design_matrix
)
diff --git a/nilearn/maskers/tests/test_base_masker.py b/nilearn/maskers/tests/test_base_masker.py
index 56c3c9c83..5c9a71d28 100644
--- a/nilearn/maskers/tests/test_base_masker.py
+++ b/nilearn/maskers/tests/test_base_masker.py
@@ -31,6 +31,9 @@ extra_valid_checks = [
if compare_version(sklearn_version, "<", "1.5.0"):
extra_valid_checks.append("check_estimator_sparse_data")
+if compare_version(sklearn_version, ">", "1.6"):
+ extra_valid_checks.append("check_positive_only_tag_during_fit")
+
@pytest.mark.parametrize(
"estimator, check, name",
diff --git a/nilearn/plotting/tests/test_matrix_plotting.py b/nilearn/plotting/tests/test_matrix_plotting.py
index 1e8ca4135..af611373d 100644
--- a/nilearn/plotting/tests/test_matrix_plotting.py
+++ b/nilearn/plotting/tests/test_matrix_plotting.py
@@ -381,7 +381,9 @@ def test_plot_design_matrix_correlation_smoke_path(tmp_path):
def test_plot_design_matrix_correlation_errors(mat):
- with pytest.raises(ValueError, match="table path foo could not be loaded"):
+ with pytest.raises(
+ ValueError, match="Tables to load can only be TSV or CSV."
+ ):
plot_design_matrix_correlation("foo")
with pytest.raises(ValueError, match="dataframe cannot be empty."):
diff --git a/nilearn/regions/tests/test_hierarchical_kmeans_clustering.py b/nilearn/regions/tests/test_hierarchical_kmeans_clustering.py
index 79b3ba7a0..921a4dfbc 100644
--- a/nilearn/regions/tests/test_hierarchical_kmeans_clustering.py
+++ b/nilearn/regions/tests/test_hierarchical_kmeans_clustering.py
@@ -50,6 +50,9 @@ if compare_version(sklearn_version, ">", "1.5.2"):
if compare_version(sklearn_version, "<", "1.5.0"):
extra_valid_checks.append("check_estimator_sparse_data")
+if compare_version(sklearn_version, ">", "1.6"):
+ extra_valid_checks.append("check_positive_only_tag_during_fit")
+
@pytest.mark.parametrize(
"estimator, check, name",
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 3
} | 0.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
ansi2html==1.9.2
babel==2.17.0
beautifulsoup4==4.13.3
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
contourpy==1.3.0
coverage==7.8.0
cycler==0.12.1
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
fonttools==4.56.0
furo==2024.8.6
htmlmin2==0.1.13
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig==2.1.0
Jinja2==3.1.6
joblib==1.4.2
kaleido==0.2.1
kiwisolver==1.4.7
latexcodec==3.0.0
lxml==5.3.1
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.9.4
mdit-py-plugins==0.4.2
mdurl==0.1.2
memory-profiler==0.61.0
myst-parser==3.0.1
narwhals==1.32.0
nibabel==5.3.2
-e git+https://github.com/nilearn/nilearn.git@c4258e942754e6630ad89891214cc9903e3e203b#egg=nilearn
numpy==2.0.2
numpydoc==1.8.0
packaging==24.2
pandas==2.2.3
pillow==11.1.0
platformdirs==4.3.7
plotly==6.0.1
pluggy==1.5.0
psutil==7.0.0
pybtex==0.24.0
pybtex-docutils==1.0.3
Pygments==2.19.1
pyparsing==3.2.3
pyproject-api==1.9.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-randomly==3.16.0
pytest-reporter==0.5.3
pytest-reporter-html1==0.9.2
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
requests==2.32.3
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
snowballstemmer==2.2.0
soupsieve==2.6
Sphinx==7.4.7
sphinx-basic-ng==1.0.0b2
sphinx-copybutton==0.5.2
sphinx-gallery==0.19.0
sphinx_design==0.6.1
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-bibtex==2.6.3
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
sphinxext-opengraph==0.9.1
tabulate==0.9.0
threadpoolctl==3.6.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: nilearn
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- ansi2html==1.9.2
- babel==2.17.0
- beautifulsoup4==4.13.3
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- contourpy==1.3.0
- coverage==7.8.0
- cycler==0.12.1
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- fonttools==4.56.0
- furo==2024.8.6
- htmlmin2==0.1.13
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- jinja2==3.1.6
- joblib==1.4.2
- kaleido==0.2.1
- kiwisolver==1.4.7
- latexcodec==3.0.0
- lxml==5.3.1
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib==3.9.4
- mdit-py-plugins==0.4.2
- mdurl==0.1.2
- memory-profiler==0.61.0
- myst-parser==3.0.1
- narwhals==1.32.0
- nibabel==5.3.2
- nilearn==0.11.1.dev35+gc4258e942
- numpy==2.0.2
- numpydoc==1.8.0
- packaging==24.2
- pandas==2.2.3
- pillow==11.1.0
- platformdirs==4.3.7
- plotly==6.0.1
- pluggy==1.5.0
- psutil==7.0.0
- pybtex==0.24.0
- pybtex-docutils==1.0.3
- pygments==2.19.1
- pyparsing==3.2.3
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-randomly==3.16.0
- pytest-reporter==0.5.3
- pytest-reporter-html1==0.9.2
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- requests==2.32.3
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.12
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- snowballstemmer==2.2.0
- soupsieve==2.6
- sphinx==7.4.7
- sphinx-basic-ng==1.0.0b2
- sphinx-copybutton==0.5.2
- sphinx-design==0.6.1
- sphinx-gallery==0.19.0
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-bibtex==2.6.3
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sphinxext-opengraph==0.9.1
- tabulate==0.9.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/nilearn
| [
"nilearn/glm/tests/test_first_level.py::test_fmri_inputs_type_design_matrices_smoke",
"nilearn/glm/tests/test_first_level.py::test_check_run_tables_errors",
"nilearn/glm/tests/test_first_level.py::test_fmri_inputs_design_matrices_tsv",
"nilearn/glm/tests/test_second_level.py::test_secondlevelmodel_design_matrix_error_path[foo]",
"nilearn/glm/tests/test_second_level.py::test_secondlevelmodel_design_matrix_error_path[design_matrix1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_write[affine1-5-shapes0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_write[affine1-3-shapes1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_write[affine1-1-shapes1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_write[affine1-3-shapes0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_write[None-1-shapes1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_write[None-5-shapes0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_write[affine1-5-shapes1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_write[None-5-shapes1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_write[None-1-shapes0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_write[None-3-shapes1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_write[None-3-shapes0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_write[affine1-1-shapes0]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_plot_design_matrix_correlation_errors",
"nilearn/_utils/tests/test_glm.py::test_img_table_checks"
] | [
"nilearn/glm/tests/test_first_level.py::test_check_estimator_invalid[estimator15-check15-check_estimator_sparse_tag]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator_invalid[estimator3-check3-check_estimator_sparse_tag]",
"nilearn/glm/tests/test_second_level.py::test_check_estimator_invalid[estimator14-check14-check_estimator_sparse_tag]"
] | [
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator14-check14-check_transformers_unfitted]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator10-check10-check_mixin_order]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator17-check17-check_fit2d_1sample]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator1-check1-check_estimator_cloneable]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator15-check15-check_transformer_n_iter]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator20-check20-check_set_params]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator19-check19-check_get_params_invariance]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator18-check18-check_fit2d_1feature]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator11-check11-check_positive_only_tag_during_fit]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator2-check2-check_estimator_tags_renamed]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator16-check16-check_parameters_default_constructible]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator6-check6-check_estimators_overwrite_params]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator12-check12-check_estimator_sparse_array]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator5-check5-check_no_attributes_set_in_init]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator3-check3-check_valid_tag_types]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator9-check9-check_do_not_raise_errors_in_init_or_set_params]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator13-check13-check_estimator_sparse_matrix]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator7-check7-check_dont_overwrite_parameters]",
"nilearn/maskers/tests/test_base_masker.py::test_cropping_code_paths",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator8-check8-check_estimators_unfitted]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator4-check4-check_estimator_repr]",
"nilearn/maskers/tests/test_base_masker.py::test_check_estimator[estimator0-check0-check_estimator_cloneable]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[T1w-1-0-n_runs1]",
"nilearn/glm/tests/test_first_level.py::test_missing_trial_type_column_warning",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_validation_img_filter[img_filters2-ValueError-bids",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_select_one_run_per_session",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_several_labels_per_entity[acq]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_slice_time_ref[0.0]",
"nilearn/glm/tests/test_first_level.py::test_check_estimator[estimator2-check2-check_estimator_tags_renamed]",
"nilearn/glm/tests/test_first_level.py::test_flm_fit_surface_image_one_hemisphere",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[T1w-1-2-n_runs1]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_no_tr",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_validation_img_filter[img_filters1-TypeError-Filters",
"nilearn/glm/tests/test_first_level.py::test_warn_flm_smooth_surface_image",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_validation_task_label[$$$-ValueError]",
"nilearn/glm/tests/test_first_level.py::test_get_voxelwise_attributes_should_return_as_many_as_design_matrices[shapes0]",
"nilearn/glm/tests/test_first_level.py::test_glm_ar_estimates[ar_vals2]",
"nilearn/glm/tests/test_first_level.py::test_high_level_glm_with_data_with_mask",
"nilearn/glm/tests/test_first_level.py::test_first_level_hrf_model[True-glover]",
"nilearn/glm/tests/test_first_level.py::test_first_level_residuals_errors",
"nilearn/glm/tests/test_first_level.py::test_img_table_checks",
"nilearn/glm/tests/test_first_level.py::test_fmri_inputs_events_type",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_validation_img_filter[img_filters3-ValueError-is",
"nilearn/glm/tests/test_first_level.py::test_first_level_with_no_signal_scaling",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_slice_time_ref[None]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_unused_kwargs",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[MNI-0-2-n_runs1]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_space_none",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_with_subject_labels",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[T1w-1-0-n_runs0]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[MNI-1-2-n_runs1]",
"nilearn/glm/tests/test_first_level.py::test_first_level_hrf_model[False-<lambda>]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_set_repetition_time_errors[not",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_several_labels_per_entity[res]",
"nilearn/glm/tests/test_first_level.py::test_flm_with_surface_image_with_surface_masker",
"nilearn/glm/tests/test_first_level.py::test_list_valid_subjects_with_toplevel_files",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_no_bold_file",
"nilearn/glm/tests/test_first_level.py::test_flm_with_surface_masker_with_mask",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_several_labels_per_entity[dir]",
"nilearn/glm/tests/test_first_level.py::test_first_level_hrf_model[False-glover]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_validation_space_label[42-TypeError]",
"nilearn/glm/tests/test_first_level.py::test_first_level_design_creation",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_no_derivatives",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_deprecated_slice_time_default",
"nilearn/glm/tests/test_first_level.py::test_explicit_fixed_effects",
"nilearn/glm/tests/test_first_level.py::test_flm_with_surface_data_no_design_matrix",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_with_missing_events",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[MNI-0-0-n_runs0]",
"nilearn/glm/tests/test_first_level.py::test_first_level_hrf_model[True-spm",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_mismatch_run_index",
"nilearn/glm/tests/test_first_level.py::test_high_level_glm_different_design_matrices",
"nilearn/glm/tests/test_first_level.py::test_high_level_glm_one_run",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_several_labels_per_entity[ce]",
"nilearn/glm/tests/test_first_level.py::test_first_level_predictions_r_square",
"nilearn/glm/tests/test_first_level.py::test_run_glm_ols",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_subject_order_with_labels",
"nilearn/glm/tests/test_first_level.py::test_run_glm_ar3",
"nilearn/glm/tests/test_first_level.py::test_check_estimator[estimator6-check6-check_mixin_order]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_no_session",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[T1w-1-2-n_runs0]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_get_repetition_time_from_derivatives",
"nilearn/glm/tests/test_first_level.py::test_error_flm_volume_mask_surface_image",
"nilearn/glm/tests/test_first_level.py::test_glm_ar_estimates[ar_vals0]",
"nilearn/glm/tests/test_first_level.py::test_glm_random_state[3]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_set_slice_timing_ref_warnings",
"nilearn/glm/tests/test_first_level.py::test_high_level_glm_with_data",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_set_slice_timing_ref_errors[not",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_get_start_time_from_derivatives",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_set_slice_timing_ref_errors[2-ValueError-between",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_load_confounds_warnings",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[T1w-0-2-n_runs1]",
"nilearn/glm/tests/test_first_level.py::test_flm_compute_contrast_with_surface_data",
"nilearn/glm/tests/test_first_level.py::test_check_estimator[estimator1-check1-check_estimator_cloneable]",
"nilearn/glm/tests/test_first_level.py::test_run_glm_errors",
"nilearn/glm/tests/test_first_level.py::test_first_level_hrf_model[False-spm",
"nilearn/glm/tests/test_first_level.py::test_first_level_glm_computation_with_memory_caching",
"nilearn/glm/tests/test_first_level.py::test_flm_fit_surface_image_with_mask",
"nilearn/glm/tests/test_first_level.py::test_check_trial_type_warning",
"nilearn/glm/tests/test_first_level.py::test_check_estimator[estimator0-check0-check_estimator_cloneable]",
"nilearn/glm/tests/test_first_level.py::test_glm_random_state[random_state1]",
"nilearn/glm/tests/test_first_level.py::test_first_level_with_scaling",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_slice_time_ref[1.0]",
"nilearn/glm/tests/test_first_level.py::test_error_flm_surface_mask_volume_image",
"nilearn/glm/tests/test_first_level.py::test_check_estimator[estimator3-check3-check_valid_tag_types]",
"nilearn/glm/tests/test_first_level.py::test_check_estimator[estimator5-check5-check_estimators_unfitted]",
"nilearn/glm/tests/test_first_level.py::test_first_level_hrf_model[False-spm]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_validation_sub_labels[sub_labels2-TypeError-must",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_smoke_test_for_verbose_argument[0]",
"nilearn/glm/tests/test_first_level.py::test_high_level_glm_with_paths",
"nilearn/glm/tests/test_first_level.py::test_glm_ar_estimates_errors",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_validation_sub_labels[42-TypeError-must",
"nilearn/glm/tests/test_first_level.py::test_flm_get_voxelwise_model_attribute_with_surface_data",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[MNI-0-0-n_runs1]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[T1w-0-0-n_runs1]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_one_confound_missing",
"nilearn/glm/tests/test_first_level.py::test_check_estimator[estimator10-check10-check_set_params]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[T1w-0-2-n_runs0]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_several_labels_per_entity[rec]",
"nilearn/glm/tests/test_first_level.py::test_fmri_inputs_shape",
"nilearn/glm/tests/test_first_level.py::test_explicit_fixed_effects_without_mask",
"nilearn/glm/tests/test_first_level.py::test_get_voxelwise_attributes_should_return_as_many_as_design_matrices[shapes1]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_set_repetition_time_errors[-1-ValueError-positive]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_no_duplicate_sub_labels",
"nilearn/glm/tests/test_first_level.py::test_first_level_contrast_computation_errors",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_smoke_test_for_verbose_argument[1]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[T1w-0-0-n_runs0]",
"nilearn/glm/tests/test_first_level.py::test_glm_ar_estimates[ar_vals1]",
"nilearn/glm/tests/test_first_level.py::test_flm_fit_surface_image_default_mask_img",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_validation_space_label[$$$-ValueError]",
"nilearn/glm/tests/test_first_level.py::test_run_glm_ar1",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_select_all_runs_of_one_session",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_surface",
"nilearn/glm/tests/test_first_level.py::test_first_level_residuals",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[MNI-1-0-n_runs1]",
"nilearn/glm/tests/test_first_level.py::test_first_level_hrf_model[True-<lambda>]",
"nilearn/glm/tests/test_first_level.py::test_first_level_contrast_computation",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_no_subject",
"nilearn/glm/tests/test_first_level.py::test_check_estimator[estimator7-check7-check_estimator_sparse_array]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_load_confounds",
"nilearn/glm/tests/test_first_level.py::test_compute_contrast_num_contrasts",
"nilearn/glm/tests/test_first_level.py::test_high_level_glm_different_design_matrices_formulas",
"nilearn/glm/tests/test_first_level.py::test_fmri_inputs_errors_confounds",
"nilearn/glm/tests/test_first_level.py::test_check_estimator[estimator8-check8-check_estimator_sparse_matrix]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[MNI-1-0-n_runs0]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_validation_task_label[42-TypeError]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_too_many_bold_files",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_all_confounds_missing",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_validation_input_dataset_path",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_validation_img_filter[foo-TypeError-'img_filters'",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_several_labels_per_entity[echo]",
"nilearn/glm/tests/test_first_level.py::test_slice_time_ref_warning_only_when_not_provided",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[MNI-0-2-n_runs0]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_slice_time_ref[0.5]",
"nilearn/glm/tests/test_first_level.py::test_fmri_inputs_errors",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_with_one_events_missing",
"nilearn/glm/tests/test_first_level.py::test_fixed_effect_contrast_surface",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_validation_sub_labels[sub_labels1-TypeError-must",
"nilearn/glm/tests/test_first_level.py::test_high_level_glm_null_contrasts",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_subject_order",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_get_metadata_from_derivatives",
"nilearn/glm/tests/test_first_level.py::test_glm_sample_mask",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_several_labels_per_entity[den]",
"nilearn/glm/tests/test_first_level.py::test_check_estimator[estimator4-check4-check_estimator_repr]",
"nilearn/glm/tests/test_first_level.py::test_fmri_inputs_with_confounds",
"nilearn/glm/tests/test_first_level.py::test_first_level_glm_computation",
"nilearn/glm/tests/test_first_level.py::test_scaling",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids[MNI-1-2-n_runs0]",
"nilearn/glm/tests/test_first_level.py::test_first_level_hrf_model[True-spm]",
"nilearn/glm/tests/test_first_level.py::test_flm_fit_surface_image",
"nilearn/glm/tests/test_first_level.py::test_fmri_inputs_type_data_smoke",
"nilearn/glm/tests/test_first_level.py::test_check_estimator[estimator9-check9-check_get_params_invariance]",
"nilearn/glm/tests/test_first_level.py::test_first_level_from_bids_set_repetition_time_warnings",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator20-check20-check_estimator_sparse_matrix]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator26-check26-check_clusterer_compute_labels_predict]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_surface[4]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator22-check22-check_estimators_pickle]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator23-check23-check_f_contiguous_array_estimator]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator31-check31-check_fit2d_1sample]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator32-check32-check_fit2d_1feature]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator25-check25-check_transformer_n_iter]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator30-check30-check_parameters_default_constructible]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator8-check8-check_estimators_fit_returns_self]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator21-check21-check_estimators_pickle]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator2-check2-check_estimator_tags_renamed]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator29-check29-check_estimators_partial_fit_n_features]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_adjust_small_clusters[test_list1-9]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator17-check17-check_estimators_empty_data_messages]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator10-check10-check_estimators_unfitted]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_surface[2]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator0-check0-check_estimator_cloneable]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator16-check16-check_dtype_object]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator19-check19-check_estimator_sparse_array]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator13-check13-check_positive_only_tag_during_fit]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_adjust_small_clusters[test_list3-11]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator11-check11-check_do_not_raise_errors_in_init_or_set_params]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator27-check27-check_clustering]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator28-check28-check_clustering]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator14-check14-check_estimators_dtypes]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator34-check34-check_set_params]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator4-check4-check_estimator_repr]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator15-check15-check_complex_data]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator37-check37-check_fit1d]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator24-check24-check_transformers_unfitted]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator9-check9-check_readonly_memmap_input]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator6-check6-check_fit_score_takes_y]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator1-check1-check_estimator_cloneable]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_adjust_small_clusters[test_list0-5]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_adjust_small_clusters[test_list2-10]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator12-check12-check_mixin_order]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator7-check7-check_dont_overwrite_parameters]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator5-check5-check_no_attributes_set_in_init]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_surface[5]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator18-check18-check_pipeline_consistency]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator3-check3-check_valid_tag_types]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator35-check35-check_dict_unchanged]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator33-check33-check_get_params_invariance]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator36-check36-check_fit_check_is_fitted]",
"nilearn/glm/tests/test_second_level.py::test_check_estimator[estimator1-check1-check_estimator_cloneable]",
"nilearn/glm/tests/test_second_level.py::test_check_estimator[estimator3-check3-check_valid_tag_types]",
"nilearn/glm/tests/test_second_level.py::test_second_level_surface_image_contrast_computation",
"nilearn/glm/tests/test_second_level.py::test_check_estimator[estimator2-check2-check_estimator_tags_renamed]",
"nilearn/glm/tests/test_second_level.py::test_second_level_input_as_3d_images",
"nilearn/glm/tests/test_second_level.py::test_second_level_glm_computation",
"nilearn/glm/tests/test_second_level.py::test_second_level_voxelwise_attribute[residuals]",
"nilearn/glm/tests/test_second_level.py::test_non_parametric_inference_contrast_computation",
"nilearn/glm/tests/test_second_level.py::test_check_n_rows_desmat_vs_n_effect_maps",
"nilearn/glm/tests/test_second_level.py::test_fmri_inputs_dataframes_as_input",
"nilearn/glm/tests/test_second_level.py::test_high_level_non_parametric_inference_with_paths",
"nilearn/glm/tests/test_second_level.py::test_second_level_contrast_computation_errors",
"nilearn/glm/tests/test_second_level.py::test_second_level_input_as_surface_image",
"nilearn/glm/tests/test_second_level.py::test_check_estimator[estimator6-check6-check_do_not_raise_errors_in_init_or_set_params]",
"nilearn/glm/tests/test_second_level.py::test_second_level_voxelwise_attribute_errors[residuals]",
"nilearn/glm/tests/test_second_level.py::test_check_estimator[estimator9-check9-check_estimator_sparse_matrix]",
"nilearn/glm/tests/test_second_level.py::test_fmri_inputs_pandas_errors",
"nilearn/glm/tests/test_second_level.py::test_high_level_glm_with_paths",
"nilearn/glm/tests/test_second_level.py::test_second_level_input_as_surface_image_3d",
"nilearn/glm/tests/test_second_level.py::test_non_parametric_inference_contrast_formula[second_level_contrast0]",
"nilearn/glm/tests/test_second_level.py::test_second_level_voxelwise_attribute_errors[predicted]",
"nilearn/glm/tests/test_second_level.py::test_check_estimator[estimator10-check10-check_parameters_default_constructible]",
"nilearn/glm/tests/test_second_level.py::test_second_level_t_contrast_length_errors",
"nilearn/glm/tests/test_second_level.py::test_secondlevelmodel_fit_inputs_errors",
"nilearn/glm/tests/test_second_level.py::test_second_level_f_contrast_length_errors",
"nilearn/glm/tests/test_second_level.py::test_check_second_level_input_confounds",
"nilearn/glm/tests/test_second_level.py::test_non_parametric_inference_contrast_computation_errors",
"nilearn/glm/tests/test_second_level.py::test_non_parametric_inference_tfce",
"nilearn/glm/tests/test_second_level.py::test_non_parametric_inference_permutation_computation",
"nilearn/glm/tests/test_second_level.py::test_second_level_input_as_surface_image_with_mask",
"nilearn/glm/tests/test_second_level.py::test_check_estimator[estimator0-check0-check_estimator_cloneable]",
"nilearn/glm/tests/test_second_level.py::test_check_estimator[estimator7-check7-check_mixin_order]",
"nilearn/glm/tests/test_second_level.py::test_check_estimator[estimator4-check4-check_estimator_repr]",
"nilearn/glm/tests/test_second_level.py::test_high_level_non_parametric_inference_with_paths_warning",
"nilearn/glm/tests/test_second_level.py::test_infer_effect_maps",
"nilearn/glm/tests/test_second_level.py::test_check_first_level_contrast",
"nilearn/glm/tests/test_second_level.py::test_non_parametric_inference_contrast_formula[second_level_contrast3]",
"nilearn/glm/tests/test_second_level.py::test_secondlevelmodel_design_matrix_path",
"nilearn/glm/tests/test_second_level.py::test_second_level_contrast_computation_with_memory_caching",
"nilearn/glm/tests/test_second_level.py::test_infer_effect_maps_error",
"nilearn/glm/tests/test_second_level.py::test_sort_input_dataframe",
"nilearn/glm/tests/test_second_level.py::test_check_second_level_input_list_wrong_type",
"nilearn/glm/tests/test_second_level.py::test_fmri_inputs_for_non_parametric_inference_errors",
"nilearn/glm/tests/test_second_level.py::test_non_parametric_inference_contrast_formula[r1]",
"nilearn/glm/tests/test_second_level.py::test_fmri_pandas_series_as_input",
"nilearn/glm/tests/test_second_level.py::test_second_level_voxelwise_attribute_errors[r_square]",
"nilearn/glm/tests/test_second_level.py::test_second_level_voxelwise_attribute[predicted]",
"nilearn/glm/tests/test_second_level.py::test_non_parametric_inference_cluster_level",
"nilearn/glm/tests/test_second_level.py::test_check_second_level_input",
"nilearn/glm/tests/test_second_level.py::test_check_output_type",
"nilearn/glm/tests/test_second_level.py::test_check_second_level_input_dataframe",
"nilearn/glm/tests/test_second_level.py::test_check_confounds",
"nilearn/glm/tests/test_second_level.py::test_second_level_input_as_surface_image_warning_smoothing",
"nilearn/glm/tests/test_second_level.py::test_check_second_level_input_unfit_model",
"nilearn/glm/tests/test_second_level.py::test_non_parametric_inference_with_flm_objects",
"nilearn/glm/tests/test_second_level.py::test_second_level_voxelwise_attribute[r_square]",
"nilearn/glm/tests/test_second_level.py::test_non_parametric_inference_cluster_level_with_single_covariates",
"nilearn/glm/tests/test_second_level.py::test_second_level_contrast_computation",
"nilearn/glm/tests/test_second_level.py::test_process_second_level_input_as_firstlevelmodels",
"nilearn/glm/tests/test_second_level.py::test_second_lvl_dataframe_computation",
"nilearn/glm/tests/test_second_level.py::test_check_second_level_input_design_matrix",
"nilearn/glm/tests/test_second_level.py::test_fmri_img_inputs_errors",
"nilearn/glm/tests/test_second_level.py::test_check_estimator[estimator12-check12-check_set_params]",
"nilearn/glm/tests/test_second_level.py::test_check_estimator[estimator11-check11-check_get_params_invariance]",
"nilearn/glm/tests/test_second_level.py::test_second_level_input_as_surface_image_3d_same_as_list_2d",
"nilearn/glm/tests/test_second_level.py::test_second_level_residuals",
"nilearn/glm/tests/test_second_level.py::test_second_level_input_error_surface_image_2d",
"nilearn/glm/tests/test_second_level.py::test_non_parametric_inference_contrast_formula[r1-r2]",
"nilearn/glm/tests/test_second_level.py::test_non_parametric_inference_cluster_level_with_covariates",
"nilearn/glm/tests/test_second_level.py::test_check_estimator[estimator5-check5-check_estimators_unfitted]",
"nilearn/glm/tests/test_second_level.py::test_second_level_input_as_flm_of_surface_image",
"nilearn/glm/tests/test_second_level.py::test_process_second_level_input_as_dataframe",
"nilearn/glm/tests/test_second_level.py::test_check_estimator[estimator8-check8-check_estimator_sparse_array]",
"nilearn/glm/tests/test_second_level.py::test_check_affine_first_level_models",
"nilearn/glm/tests/test_second_level.py::test_secondlevelmodel_design_matrix_error_type[1]",
"nilearn/glm/tests/test_second_level.py::test_fmri_inputs",
"nilearn/glm/tests/test_second_level.py::test_secondlevelmodel_design_matrix_error_type[design_matrix1]",
"nilearn/glm/tests/test_second_level.py::test_check_shape_first_level_models",
"nilearn/glm/tests/test_second_level.py::test_slm_4d_image",
"nilearn/glm/tests/test_second_level.py::test_high_level_glm_with_paths_errors",
"nilearn/glm/tests/test_second_level.py::test_second_level_input_as_surface_no_design_matrix",
"nilearn/_utils/tests/test_data_gen.py::test_write_fake_bold_img[affine1-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_add_metadata_to_bids_derivatives_with_json_path",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_generate[None-1-shapes0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_errors",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[None-None-None-noise-16-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_maps",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_derivatives_with_session_and_runs[tasks2-n_runs2-2-1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-3-classification-noise-16-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[1-n_samples_range1-30-5]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_labeled_regions",
"nilearn/_utils/tests/test_data_gen.py::test_generate_mni_space_img[1-30-5]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[1-n_samples_range0-30-5]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_generate[affine1-3-shapes0]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-3-classification-noise-16-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_extra_raw_entity",
"nilearn/_utils/tests/test_data_gen.py::test_basic_paradigm[True]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-4-regression-step-20-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-4-regression-step-16-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_regions_ts_with_overlap[boxcar]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-3-classification-noise-20-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-3-classification-noise-20-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_derivatives_with_session_and_runs[tasks1-n_runs1-2-2]",
"nilearn/_utils/tests/test_data_gen.py::test_write_fake_bold_img[None-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[1-n_samples_range0-9-9]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_mni_space_img[2-1-1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_raw_with_session_and_runs[tasks1-n_runs1-2-1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-4-regression-noise-16-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_timeseries[9-1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[1-n_samples_range0-9-5]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_derivatives_with_session_and_runs[tasks2-n_runs2-2-2]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_generate[affine1-5-shapes0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_generate[None-1-shapes1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[0.1-n_samples_range1-30-9]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_generate[affine1-1-shapes1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_derivatives_with_session_and_runs[tasks2-n_runs2-1-2]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_raw_with_session_and_runs[tasks2-n_runs2-1-1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[1-1-classification-noise-20-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[None-None-None-step-16-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_timeseries[1-9]",
"nilearn/_utils/tests/test_data_gen.py::test_bids_dataset_no_session",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[None-None-None-noise-20-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-4-regression-step-20-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_regions_ts_no_overlap[boxcar]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_timeseries[9-9]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_mni_space_img[1-30-1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_regions_ts_with_overlap[hamming]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_mni_space_img[1-1-5]",
"nilearn/_utils/tests/test_data_gen.py::test_add_metadata_to_bids_derivatives_default_path",
"nilearn/_utils/tests/test_data_gen.py::test_generate_timeseries[1-1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_random_img[None-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[1-1-classification-step-16-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_create_fake_bids_dataset_no_confounds[None-True]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-3-classification-step-16-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_derivatives_with_session_and_runs[tasks0-n_runs0-2-2]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_derivatives_with_session_and_runs[tasks2-n_runs2-1-1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-4-regression-noise-16-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[1-1-classification-step-20-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[0.1-n_samples_range0-30-5]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_derivatives_with_session_and_runs[tasks0-n_runs0-2-1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_derivatives_with_session_and_runs[tasks0-n_runs0-1-1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-4-regression-step-16-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-3-classification-step-20-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_extra_derivative_entity",
"nilearn/_utils/tests/test_data_gen.py::test_generate_mni_space_img[2-30-1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_generate[affine1-5-shapes1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_raw_with_session_and_runs[tasks0-n_runs0-1-1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[None-None-None-noise-16-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[None-None-None-noise-20-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-4-regression-noise-20-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[0.1-n_samples_range0-30-9]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[0.1-n_samples_range0-9-5]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_raw_with_session_and_runs[tasks1-n_runs1-2-2]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_raw_with_session_and_runs[tasks2-n_runs2-2-1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_generate[None-5-shapes0]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[1-1-classification-step-16-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_raw_with_session_and_runs[tasks2-n_runs2-1-2]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_raw_with_session_and_runs[tasks1-n_runs1-1-2]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri_error",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[1-1-classification-noise-16-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_extra_entity_not_bids_entity",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[1-n_samples_range1-9-5]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_random_img[affine1-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_create_fake_bids_dataset_no_confounds[_timeseries-False]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_random_img[None-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-3-classification-step-20-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_raw_with_session_and_runs[tasks2-n_runs2-2-2]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_regions_ts_no_overlap[hamming]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_mni_space_img[2-30-5]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_raw_with_session_and_runs[tasks0-n_runs0-1-2]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_generate[affine1-1-shapes0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_raw_with_session_and_runs[tasks0-n_runs0-2-1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_derivatives_with_session_and_runs[tasks0-n_runs0-1-2]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[0.1-n_samples_range1-30-5]",
"nilearn/_utils/tests/test_data_gen.py::test_create_fake_bids_dataset_no_derivatives",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_derivatives_with_session_and_runs[tasks1-n_runs1-1-1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_derivatives_with_session_and_runs[tasks1-n_runs1-2-1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_generate[affine1-3-shapes1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[1-n_samples_range1-30-9]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-3-classification-step-16-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[1-n_samples_range0-30-9]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_raw_with_session_and_runs[tasks0-n_runs0-2-2]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_generate[None-5-shapes1]",
"nilearn/_utils/tests/test_data_gen.py::test_write_fake_bold_img[affine1-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[0.1-n_samples_range1-9-9]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[1-n_samples_range1-9-9]",
"nilearn/_utils/tests/test_data_gen.py::test_write_fake_bold_img[None-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_mni_space_img[1-1-1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[1-1-classification-step-20-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[0.1-n_samples_range1-9-5]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[4-4-regression-noise-20-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_bids_dataset_no_run_entity",
"nilearn/_utils/tests/test_data_gen.py::test_basic_paradigm[False]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[None-None-None-step-20-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_raw_with_session_and_runs[tasks1-n_runs1-1-1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_random_img[affine1-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_generate[None-3-shapes0]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_fmri_data_and_design_generate[None-3-shapes1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_mni_space_img[2-1-5]",
"nilearn/_utils/tests/test_data_gen.py::test_fake_bids_derivatives_with_session_and_runs[tasks1-n_runs1-1-2]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_group_sparse_gaussian_graphs[0.1-n_samples_range0-9-9]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[1-1-classification-noise-16-shape1]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[None-None-None-step-16-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[None-None-None-step-20-shape0]",
"nilearn/_utils/tests/test_data_gen.py::test_generate_fake_fmri[1-1-classification-noise-20-shape0]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_grid[full]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_labels",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_labels[lab1]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_labels[lab0]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_set_title[foo]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_reorder[complete]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_figure_and_axes[None-axes3-False]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_plot_event_path_tsv_csv[.tsv-\\t]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_plot_design_matrix_path_str[.tsv-\\t]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_with_labels_and_different_tri[full]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_labels[None]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_reorder",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_figure_and_axes[fig1-None-True]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_figure_and_axes_error[fig2-axes2]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_set_title[",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_set_title[None]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_tri_error[foo]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_reorder_error[foo]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_tri_error[None]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_with_labels_and_different_tri[lower]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_tri[diag]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_errors[matrix0-lab0-False]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_reorder[average]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_reorder_error[None]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_plot_design_matrix_correlation[RdBu_r-full]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_plot_design_matrix_correlation_smoke_path",
"nilearn/plotting/tests/test_matrix_plotting.py::test_plot_design_matrix_correlation[seismic_r-diag]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_errors[matrix1-None-True]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_grid[lower]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_figure_and_axes_error[foo-bar]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_with_labels_and_different_tri[diag]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_reorder[single]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_figure_and_axes[None-None-True]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_reorder_error[2]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_tri[full]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_plot_design_matrix_correlation[seismic_r-full]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_plot_design_matrix_correlation[bwr-diag]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_set_title[foo",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_grid[diag]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_reorder[True]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_figure_and_axes[fig0-None-True]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_show_design_matrix",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_tri_error[2]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_show_event_plot",
"nilearn/plotting/tests/test_matrix_plotting.py::test_show_contrast_matrix",
"nilearn/plotting/tests/test_matrix_plotting.py::test_plot_design_matrix_correlation[bwr-full]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_plot_design_matrix_correlation[RdBu_r-diag]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_show_contrast_matrix_axes",
"nilearn/plotting/tests/test_matrix_plotting.py::test_pad_contrast_matrix",
"nilearn/plotting/tests/test_matrix_plotting.py::test_plot_design_matrix_path_str[.csv-,]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_tri[lower]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_matrix_plotting_errors[matrix2-lab2-",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_reorder[False]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_sanitize_figure_and_axes_error[1-2]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_plot_event_path_tsv_csv[.csv-,]",
"nilearn/plotting/tests/test_matrix_plotting.py::test_show_event_plot_duration_0"
] | [] | New BSD License | 20,419 | 754 | [
"doc/conf.py",
"nilearn/_utils/data_gen.py",
"nilearn/_utils/glm.py"
] |
|
owkin__PyDESeq2-349 | 9355b55e6c7c10c759db674d9a26d067be50b827 | 2024-12-09 09:19:04 | 5a44961789b96edb8012c3b62ba650d9e14ed05a | diff --git a/pydeseq2/utils.py b/pydeseq2/utils.py
index 4a686dd..db09e90 100644
--- a/pydeseq2/utils.py
+++ b/pydeseq2/utils.py
@@ -1083,7 +1083,7 @@ def nbinomGLM(
xbeta = design_matrix @ beta
d_neg_prior = (
beta * no_shrink_mask / prior_no_shrink_scale**2
- + 2 * beta * shrink_mask / (prior_scale**2 + beta[shrink_index] ** 2),
+ + 2 * beta * shrink_mask / (prior_scale**2 + beta[shrink_index] ** 2)
)
d_nll = (
@@ -1116,6 +1116,10 @@ def nbinomGLM(
jac=df,
hess=ddf if optimizer == "Newton-CG" else None,
method=optimizer,
+ options={
+ "ftol": 1e-8,
+ "gtol": 1e-8,
+ },
)
beta = res.x
| [BUG] lfc_shrink() generates wrong results
**Describe the bug**
lfc_shrink() generates wrong results. In most cases it acts as expected. For some genes it increases absolute value of fold change. For one differentially expressed gene with high expression (base_mean = 407277.128029) it even changes the sign (
log2FoldChange = -0.753026; shrankLog2FoldChange = 1.844100).
**To Reproduce**
```
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from pydeseq2.dds import DeseqDataSet
from pydeseq2.default_inference import DefaultInference
from pydeseq2.ds import DeseqStats
inference = DefaultInference(n_cpus=32)
dds = DeseqDataSet(
counts=counts_df,
metadata=metadata_df,
design_factors='condition',
inference=inference,
)
dds.deseq2()
stat_res = DeseqStats(
dds,
contrast=['condition', 'B', 'A'],
inference=inference
)
stat_res.summary()
stat_res_df = stat_res.results_df.copy()
stat_res.lfc_shrink('condition_B_vs_A')
stat_res_shrinked_df = stat_res.results_df.copy()
stat_res_shrinked_df = stat_res_shrinked_df.rename(columns={'log2FoldChange': 'log2FoldChange_shrank'})
merged_df = pd.merge(stat_res_df, stat_res_shrinked_df, left_index=True, right_index=True)
merged_df = merged_df[(merged_df['padj_x'].notna())]
sns.scatterplot(
merged_df,
x='log2FoldChange',
y='log2FoldChange_shrank'
)
# replace the sns.scatterplot() with code below to get rid of seaborn dependency
# plt.scatter(
# x=merged_df['log2FoldChange'],
# y=merged_df['log2FoldChange_shrank']
#)
plt.axline((0, 0), (1,1))
```
```
Fitting size factors...
... done in 0.02 seconds.
Fitting dispersions...
... done in 1.83 seconds.
Fitting dispersion trend curve...
... done in 0.55 seconds.
Fitting MAP dispersions...
... done in 1.79 seconds.
Fitting LFCs...
... done in 2.18 seconds.
Calculating cook's distance...
... done in 0.02 seconds.
Replacing 0 outlier genes.
Running Wald tests...
... done in 2.47 seconds.
Fitting MAP LFCs...
Log2 fold change & Wald test p-value: condition B vs A
baseMean log2FoldChange lfcSE stat pvalue \
GENEID
ENSG00000000003.15 6312.552040 -0.406453 0.092641 -4.387384 0.000011
ENSG00000000005.6 5.337949 -5.852983 2.030001 -2.883242 0.003936
ENSG00000000419.14 3926.067459 0.396839 0.089676 4.425236 0.000010
ENSG00000000457.14 986.626013 -0.033945 0.132989 -0.255247 0.798532
ENSG00000000460.17 3054.441682 -0.008417 0.079740 -0.105558 0.915933
... ... ... ... ... ...
ENSG00000289714.1 0.000000 NaN NaN NaN NaN
ENSG00000289715.1 0.000000 NaN NaN NaN NaN
ENSG00000289716.1 55.728811 1.269762 0.336892 3.769053 0.000164
ENSG00000289718.1 0.000000 NaN NaN NaN NaN
ENSG00000289719.1 11.956688 2.075506 0.837589 2.477953 0.013214
padj
GENEID
ENSG00000000003.15 0.000048
ENSG00000000005.6 0.010336
ENSG00000000419.14 0.000041
ENSG00000000457.14 0.863010
ENSG00000000460.17 0.944234
... ...
ENSG00000289714.1 NaN
ENSG00000289715.1 NaN
ENSG00000289716.1 0.000564
ENSG00000289718.1 NaN
ENSG00000289719.1 0.030276
[61125 rows x 6 columns]
/home/.../pydeseq2/utils.py:1260: RuntimeWarning: overflow encountered in exp
counts - (counts + size) / (1 + size * np.exp(-xbeta - offset))
Shrunk log2 fold change & Wald test p-value: condition B vs A
baseMean log2FoldChange lfcSE stat pvalue \
GENEID
ENSG00000000003.15 6312.552040 -0.405783 0.092409 -4.387384 0.000011
ENSG00000000005.6 5.337949 -6.123571 2.817030 -2.883242 0.003936
ENSG00000000419.14 3926.067459 0.387581 0.089467 4.425236 0.000010
ENSG00000000457.14 986.626013 -0.035274 0.131941 -0.255247 0.798532
ENSG00000000460.17 3054.441682 -0.007822 0.079505 -0.105558 0.915933
... ... ... ... ... ...
ENSG00000289714.1 0.000000 NaN NaN NaN NaN
ENSG00000289715.1 0.000000 NaN NaN NaN NaN
ENSG00000289716.1 55.728811 1.162090 0.337499 3.769053 0.000164
ENSG00000289718.1 0.000000 NaN NaN NaN NaN
ENSG00000289719.1 11.956688 1.440262 0.866340 2.477953 0.013214
padj
GENEID
ENSG00000000003.15 0.000048
ENSG00000000005.6 0.010336
ENSG00000000419.14 0.000041
ENSG00000000457.14 0.863010
ENSG00000000460.17 0.944234
... ...
ENSG00000289714.1 NaN
ENSG00000289715.1 NaN
ENSG00000289716.1 0.000564
ENSG00000289718.1 NaN
ENSG00000289719.1 0.030276
[61125 rows x 6 columns]
... done in 1.97 seconds.
```
**Expected behavior**
No increase in absolute value of fold change (if i understand correctly what apeGLM shrinking is doing).
No change in direction of fold change in very abundant differentially expressed genes.
No instability based on the name of conditions.
**Screenshots**

**Desktop (please complete the following information):**
- OS: Ubuntu 20.04.3 LTS
- pydeseq2: 0.4.12
**Additional context**
The behaviour is not stable based on the name of groups. If i exchange A to B the values are different and the overflow error doent pop up, however the results are still suspicious. The data i can not publicly share, but potentially i would be able to share them withthout geneids. The most weird and unstable behaviour was seen in genes with high expression. | owkin/PyDESeq2 | diff --git a/tests/data/large_counts/r_test_dispersions.csv b/tests/data/large_counts/r_test_dispersions.csv
new file mode 100644
index 0000000..98e8cbd
--- /dev/null
+++ b/tests/data/large_counts/r_test_dispersions.csv
@@ -0,0 +1,6 @@
+"","x"
+"g1",0.11861243039715
+"g2",0.0262784375410397
+"g3",0.0154560937196622
+"g4",0.0187562440702638
+"g5",0.029726368740505
diff --git a/tests/data/large_counts/r_test_lfc_shrink_res.csv b/tests/data/large_counts/r_test_lfc_shrink_res.csv
new file mode 100644
index 0000000..3fa16c8
--- /dev/null
+++ b/tests/data/large_counts/r_test_lfc_shrink_res.csv
@@ -0,0 +1,6 @@
+"","baseMean","log2FoldChange","lfcSE","pvalue","padj"
+"g1",22.1449196667942,0.0980467648341208,0.313138103429392,0.67467748621509,0.67467748621509
+"g2",381.001165601189,-0.25238081586499,0.166175542221483,0.102486854258627,0.170811423764379
+"g3",2655.2687326308,1.32808989591327,0.129192036932687,1.04455372148152e-25,5.22276860740759e-25
+"g4",11603.4762054462,0.0630455944543123,0.134144518362108,0.621192047119601,0.67467748621509
+"g5",392923.057672917,-0.409599894742138,0.172482439150294,0.0120579588905087,0.0301448972262718
diff --git a/tests/data/large_counts/r_test_res.csv b/tests/data/large_counts/r_test_res.csv
new file mode 100644
index 0000000..f661f78
--- /dev/null
+++ b/tests/data/large_counts/r_test_res.csv
@@ -0,0 +1,6 @@
+"","baseMean","log2FoldChange","lfcSE","stat","pvalue","padj"
+"g1",22.1449196667942,0.173017658054474,0.412204780862638,0.419737145436286,0.67467748621509,0.67467748621509
+"g2",381.001165601189,-0.283429708243346,0.173572838265613,-1.63291509821152,0.102486854258627,0.170811423764379
+"g3",2655.2687326308,1.34796297540684,0.128597274635474,10.4820493220235,1.04455372148152e-25,5.22276860740759e-25
+"g4",11603.4762054462,0.0691977287249358,0.140030562520189,0.494161613576031,0.621192047119601,0.67467748621509
+"g5",392923.057672917,-0.441570776728009,0.175893527435991,-2.51044358007261,0.0120579588905087,0.0301448972262718
diff --git a/tests/data/large_counts/r_test_size_factors.csv b/tests/data/large_counts/r_test_size_factors.csv
new file mode 100644
index 0000000..6483001
--- /dev/null
+++ b/tests/data/large_counts/r_test_size_factors.csv
@@ -0,0 +1,9 @@
+"","x"
+"A1",1.06181876155545
+"A2",1.24743230704576
+"A3",1.32173521903446
+"A4",1.09150326866504
+"B1",1.0472175142877
+"B2",0.750260803174501
+"B3",1.16725656977807
+"B4",0.703136348465353
diff --git a/tests/test_pydeseq2.py b/tests/test_pydeseq2.py
index 2279fc8..54c6119 100644
--- a/tests/test_pydeseq2.py
+++ b/tests/test_pydeseq2.py
@@ -372,6 +372,72 @@ def test_iterative_size_factors(counts_df, metadata, tol=0.02):
).max() < tol
+def test_lfc_shrinkage_large_counts(counts_df, metadata, tol=0.02):
+ """Test that the outputs of the lfc_shrink function match those of the original
+ R package (starting from the same inputs), up to a tolerance in relative error in
+ the presence of a gene with very large counts.
+ """
+
+ test_path = str(Path(os.path.realpath(tests.__file__)).parent.resolve())
+ r_res = pd.read_csv(
+ os.path.join(test_path, "data/large_counts/r_test_res.csv"), index_col=0
+ )
+ r_shrunk_res = pd.read_csv(
+ os.path.join(test_path, "data/large_counts/r_test_lfc_shrink_res.csv"),
+ index_col=0,
+ )
+
+ r_size_factors = pd.read_csv(
+ os.path.join(test_path, "data/large_counts/r_test_size_factors.csv"),
+ index_col=0,
+ ).squeeze()
+
+ r_dispersions = pd.read_csv(
+ os.path.join(test_path, "data/large_counts/r_test_dispersions.csv"),
+ index_col=0,
+ ).squeeze()
+
+ counts_df = pd.DataFrame(
+ data=[
+ [25, 405, 1355, 12558, 489843],
+ [28, 480, 2144, 13844, 514571],
+ [12, 690, 1919, 15632, 564106],
+ [31, 420, 1684, 11513, 556380],
+ [34, 278, 3849, 11577, 412551],
+ [19, 249, 3086, 7296, 295565],
+ [17, 491, 4089, 13805, 280945],
+ [15, 251, 2785, 10492, 214062],
+ ],
+ index=["A1", "A2", "A3", "A4", "B1", "B2", "B3", "B4"],
+ columns=["g1", "g2", "g3", "g4", "g5"],
+ )
+
+ metadata_df = pd.DataFrame(
+ data=["A", "A", "A", "A", "B", "B", "B", "B"],
+ index=["A1", "A2", "A3", "A4", "B1", "B2", "B3", "B4"],
+ columns=["condition"],
+ )
+
+ dds = DeseqDataSet(counts=counts_df, metadata=metadata_df, design="~condition")
+ dds.deseq2()
+
+ dds.obsm["size_factors"] = r_size_factors.values
+ dds.varm["dispersions"] = r_dispersions.values
+ dds.varm["LFC"].iloc[:, 1] = r_res.log2FoldChange.values * np.log(2)
+
+ res = DeseqStats(dds, contrast=["condition", "B", "A"])
+ res.summary()
+ res.SE = r_res.lfcSE * np.log(2)
+ res.lfc_shrink(coeff="condition[T.B]")
+ shrunk_res = res.results_df
+
+ # Check that the same LFC are found (up to tol)
+ assert (
+ abs(r_shrunk_res.log2FoldChange - shrunk_res.log2FoldChange)
+ / abs(r_shrunk_res.log2FoldChange)
+ ).max() < tol
+
+
# Multi-factor tests
@pytest.mark.parametrize("with_outliers", [True, False])
def test_multifactor_deseq(counts_df, metadata, with_outliers, tol=0.04):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.10",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
anndata==0.11.4
anyio==4.9.0
argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
array_api_compat==1.11.2
arrow==1.3.0
asttokens==3.0.0
async-lru==2.0.5
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
bleach==6.2.0
certifi==2025.1.31
cffi==1.17.1
cfgv==3.4.0
charset-normalizer==3.4.1
click==8.1.8
comm==0.2.2
contourpy==1.3.1
coverage==7.8.0
cycler==0.12.1
debugpy==1.8.13
decorator==5.2.1
defusedxml==0.7.1
distlib==0.3.9
docutils==0.18.1
exceptiongroup==1.2.2
executing==2.2.0
fastjsonschema==2.21.1
filelock==3.18.0
fonttools==4.56.0
formulaic==1.1.1
formulaic-contrasts==1.0.0
fqdn==1.5.1
gitdb==4.0.12
GitPython==3.1.44
h11==0.14.0
h5py==3.13.0
httpcore==1.0.7
httpx==0.28.1
identify==2.6.9
idna==3.10
imagesize==1.4.1
iniconfig==2.1.0
interface-meta==1.3.0
ipykernel==6.29.5
ipython==8.34.0
ipywidgets==8.1.5
isoduration==20.11.0
jedi==0.19.2
Jinja2==3.1.6
joblib==1.4.2
json5==0.10.0
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
jupyter==1.1.1
jupyter-console==6.6.3
jupyter-events==0.12.0
jupyter-lsp==2.2.5
jupyter_client==8.6.3
jupyter_core==5.7.2
jupyter_server==2.15.0
jupyter_server_terminals==0.5.3
jupyterlab==4.3.6
jupyterlab_pygments==0.3.0
jupyterlab_server==2.27.3
jupyterlab_widgets==3.0.13
kiwisolver==1.4.8
latexcodec==3.0.0
livereload==2.7.1
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.10.1
matplotlib-inline==0.1.7
mdit-py-plugins==0.4.2
mdurl==0.1.2
mistune==3.1.3
mypy==1.15.0
mypy-extensions==1.0.0
myst-parser==2.0.0
natsort==8.4.0
nbclient==0.10.2
nbconvert==7.16.6
nbformat==5.10.4
nest-asyncio==1.6.0
nodeenv==1.9.1
notebook==7.3.3
notebook_shim==0.2.4
numpy==2.2.4
numpydoc==1.8.0
overrides==7.7.0
packaging==24.2
pandas==2.2.3
pandas-stubs==2.2.3.250308
pandocfilters==1.5.1
parso==0.8.4
pexpect==4.9.0
pillow==11.1.0
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
prometheus_client==0.21.1
prompt_toolkit==3.0.50
psutil==7.0.0
ptyprocess==0.7.0
pure_eval==0.2.3
pybtex==0.24.0
pybtex-docutils==1.0.3
pycparser==2.22
-e git+https://github.com/owkin/PyDESeq2.git@9355b55e6c7c10c759db674d9a26d067be50b827#egg=pydeseq2
Pygments==2.19.1
pyparsing==3.2.3
pytest==8.3.5
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
pytz==2025.2
PyYAML==6.0.2
pyzmq==26.3.0
referencing==0.36.2
requests==2.32.3
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rpds-py==0.24.0
scikit-learn==1.6.1
scipy==1.15.2
Send2Trash==1.8.3
session_info==1.0.0
six==1.17.0
smmap==5.0.2
sniffio==1.3.1
snowballstemmer==2.2.0
soupsieve==2.6
Sphinx==6.0.1
sphinx-autobuild==2020.9.1
sphinx-autodoc-typehints==1.23.0
sphinx-click==3.1.0
sphinx-gallery==0.11.1
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-bibtex==2.5.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
stack-data==0.6.3
stdlib-list==0.11.1
swebench_matterhorn @ file:///swebench_matterhorn
tabulate==0.9.0
terminado==0.18.1
texttable==1.6.3
threadpoolctl==3.6.0
tinycss2==1.4.0
tomli==2.2.1
tornado==6.4.2
traitlets==5.14.3
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
uri-template==1.3.0
urllib3==2.3.0
virtualenv==20.29.3
wcwidth==0.2.13
webcolors==24.11.1
webencodings==0.5.1
websocket-client==1.8.0
widgetsnbextension==4.0.13
wrapt==1.17.2
| name: PyDESeq2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- anndata==0.11.4
- anyio==4.9.0
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- array-api-compat==1.11.2
- arrow==1.3.0
- asttokens==3.0.0
- async-lru==2.0.5
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- bleach==6.2.0
- certifi==2025.1.31
- cffi==1.17.1
- cfgv==3.4.0
- charset-normalizer==3.4.1
- click==8.1.8
- comm==0.2.2
- contourpy==1.3.1
- coverage==7.8.0
- cycler==0.12.1
- debugpy==1.8.13
- decorator==5.2.1
- defusedxml==0.7.1
- distlib==0.3.9
- docutils==0.18.1
- exceptiongroup==1.2.2
- executing==2.2.0
- fastjsonschema==2.21.1
- filelock==3.18.0
- fonttools==4.56.0
- formulaic==1.1.1
- formulaic-contrasts==1.0.0
- fqdn==1.5.1
- gitdb==4.0.12
- gitpython==3.1.44
- h11==0.14.0
- h5py==3.13.0
- httpcore==1.0.7
- httpx==0.28.1
- identify==2.6.9
- idna==3.10
- imagesize==1.4.1
- iniconfig==2.1.0
- interface-meta==1.3.0
- ipykernel==6.29.5
- ipython==8.34.0
- ipywidgets==8.1.5
- isoduration==20.11.0
- jedi==0.19.2
- jinja2==3.1.6
- joblib==1.4.2
- json5==0.10.0
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- jupyter==1.1.1
- jupyter-client==8.6.3
- jupyter-console==6.6.3
- jupyter-core==5.7.2
- jupyter-events==0.12.0
- jupyter-lsp==2.2.5
- jupyter-server==2.15.0
- jupyter-server-terminals==0.5.3
- jupyterlab==4.3.6
- jupyterlab-pygments==0.3.0
- jupyterlab-server==2.27.3
- jupyterlab-widgets==3.0.13
- kiwisolver==1.4.8
- latexcodec==3.0.0
- livereload==2.7.1
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib==3.10.1
- matplotlib-inline==0.1.7
- mdit-py-plugins==0.4.2
- mdurl==0.1.2
- mistune==3.1.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- myst-parser==2.0.0
- natsort==8.4.0
- nbclient==0.10.2
- nbconvert==7.16.6
- nbformat==5.10.4
- nest-asyncio==1.6.0
- nodeenv==1.9.1
- notebook==7.3.3
- notebook-shim==0.2.4
- numpy==2.2.4
- numpydoc==1.8.0
- overrides==7.7.0
- packaging==24.2
- pandas==2.2.3
- pandas-stubs==2.2.3.250308
- pandocfilters==1.5.1
- parso==0.8.4
- pexpect==4.9.0
- pillow==11.1.0
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- prometheus-client==0.21.1
- prompt-toolkit==3.0.50
- psutil==7.0.0
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pybtex==0.24.0
- pybtex-docutils==1.0.3
- pycparser==2.22
- pydeseq2==0.5.0rc2
- pygments==2.19.1
- pyparsing==3.2.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pytz==2025.2
- pyyaml==6.0.2
- pyzmq==26.3.0
- referencing==0.36.2
- requests==2.32.3
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rpds-py==0.24.0
- scikit-learn==1.6.1
- scipy==1.15.2
- send2trash==1.8.3
- session-info==1.0.0
- six==1.17.0
- smmap==5.0.2
- sniffio==1.3.1
- snowballstemmer==2.2.0
- soupsieve==2.6
- sphinx==6.0.1
- sphinx-autobuild==2020.9.1
- sphinx-autodoc-typehints==1.23.0
- sphinx-click==3.1.0
- sphinx-gallery==0.11.1
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-bibtex==2.5.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- stack-data==0.6.3
- stdlib-list==0.11.1
- swebench-matterhorn==0.0.0
- tabulate==0.9.0
- terminado==0.18.1
- texttable==1.6.3
- threadpoolctl==3.6.0
- tinycss2==1.4.0
- tomli==2.2.1
- tornado==6.4.2
- traitlets==5.14.3
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- uri-template==1.3.0
- urllib3==2.3.0
- virtualenv==20.29.3
- wcwidth==0.2.13
- webcolors==24.11.1
- webencodings==0.5.1
- websocket-client==1.8.0
- widgetsnbextension==4.0.13
- wrapt==1.17.2
prefix: /opt/conda/envs/PyDESeq2
| [
"tests/test_pydeseq2.py::test_lfc_shrinkage_large_counts"
] | [] | [
"tests/test_pydeseq2.py::test_size_factors_ratio",
"tests/test_pydeseq2.py::test_size_factors_poscounts",
"tests/test_pydeseq2.py::test_size_factors_control_genes",
"tests/test_pydeseq2.py::test_deseq_independent_filtering_parametric_fit",
"tests/test_pydeseq2.py::test_deseq_independent_filtering_mean_fit",
"tests/test_pydeseq2.py::test_deseq_without_independent_filtering_parametric_fit",
"tests/test_pydeseq2.py::test_alt_hypothesis[lessAbs]",
"tests/test_pydeseq2.py::test_alt_hypothesis[greaterAbs]",
"tests/test_pydeseq2.py::test_alt_hypothesis[less]",
"tests/test_pydeseq2.py::test_alt_hypothesis[greater]",
"tests/test_pydeseq2.py::test_deseq_no_refit_cooks",
"tests/test_pydeseq2.py::test_lfc_shrinkage",
"tests/test_pydeseq2.py::test_lfc_shrinkage_no_apeAdapt",
"tests/test_pydeseq2.py::test_iterative_size_factors",
"tests/test_pydeseq2.py::test_multifactor_deseq[True]",
"tests/test_pydeseq2.py::test_multifactor_deseq[False]",
"tests/test_pydeseq2.py::test_multifactor_lfc_shrinkage",
"tests/test_pydeseq2.py::test_continuous_deseq[True]",
"tests/test_pydeseq2.py::test_continuous_deseq[False]",
"tests/test_pydeseq2.py::test_continuous_lfc_shrinkage",
"tests/test_pydeseq2.py::test_wide_deseq[True]",
"tests/test_pydeseq2.py::test_wide_deseq[False]",
"tests/test_pydeseq2.py::test_contrast",
"tests/test_pydeseq2.py::test_anndata_init",
"tests/test_pydeseq2.py::test_design_matrix_init",
"tests/test_pydeseq2.py::test_vst",
"tests/test_pydeseq2.py::test_mean_vst",
"tests/test_pydeseq2.py::test_deseq2_norm",
"tests/test_pydeseq2.py::test_deseq2_norm_fit",
"tests/test_pydeseq2.py::test_deseq2_norm_transform",
"tests/test_pydeseq2.py::test_vst_fit",
"tests/test_pydeseq2.py::test_vst_transform",
"tests/test_pydeseq2.py::test_vst_blind[mean-parametric]",
"tests/test_pydeseq2.py::test_vst_blind[parametric-mean]",
"tests/test_pydeseq2.py::test_vst_blind[parametric-parametric]",
"tests/test_pydeseq2.py::test_vst_blind[mean-mean]",
"tests/test_pydeseq2.py::test_vst_transform_no_fit"
] | [] | MIT License | 20,434 | 269 | [
"pydeseq2/utils.py"
] |
|
narwhals-dev__narwhals-1544 | d788eece6f1bee9e21d3840e1ec9e25799504331 | 2024-12-09 09:48:54 | a8fb9a2159a0072280e53b9f5726f8d36b7793ba | Machele-codez: @MarcoGorelli can you help me out with the failing check? It doesn't seem to be an effect of some code I wrote :thinking: (I may be wrong) | diff --git a/narwhals/dependencies.py b/narwhals/dependencies.py
index 463a64a6..61937f08 100644
--- a/narwhals/dependencies.py
+++ b/narwhals/dependencies.py
@@ -24,6 +24,9 @@ if TYPE_CHECKING:
import pyarrow as pa
import pyspark.sql as pyspark_sql
+ from narwhals.dataframe import DataFrame
+ from narwhals.dataframe import LazyFrame
+ from narwhals.series import Series
from narwhals.typing import IntoSeries
# We silently allow these but - given that they claim
@@ -320,6 +323,42 @@ def is_into_dataframe(native_dataframe: Any) -> bool:
)
+def is_narwhals_dataframe(df: Any) -> TypeGuard[DataFrame[Any]]:
+ """Check whether `df` is a Narwhals DataFrame.
+
+ This is useful if you expect a user to pass in a Narwhals
+ DataFrame directly, and you want to catch both ``narwhals.DataFrame``
+ and ``narwhals.stable.v1.DataFrame`.
+ """
+ from narwhals.dataframe import DataFrame
+
+ return isinstance(df, DataFrame)
+
+
+def is_narwhals_lazyframe(lf: Any) -> TypeGuard[LazyFrame[Any]]:
+ """Check whether `lf` is a Narwhals LazyFrame.
+
+ This is useful if you expect a user to pass in a Narwhals
+ LazyFrame directly, and you want to catch both ``narwhals.LazyFrame``
+ and ``narwhals.stable.v1.LazyFrame`.
+ """
+ from narwhals.dataframe import LazyFrame
+
+ return isinstance(lf, LazyFrame)
+
+
+def is_narwhals_series(ser: Any) -> TypeGuard[Series[Any]]:
+ """Check whether `ser` is a Narwhals Series.
+
+ This is useful if you expect a user to pass in a Narwhals
+ Series directly, and you want to catch both ``narwhals.Series``
+ and ``narwhals.stable.v1.Series`.
+ """
+ from narwhals.series import Series
+
+ return isinstance(ser, Series)
+
+
__all__ = [
"get_cudf",
"get_ibis",
@@ -336,6 +375,9 @@ __all__ = [
"is_into_series",
"is_modin_dataframe",
"is_modin_series",
+ "is_narwhals_dataframe",
+ "is_narwhals_lazyframe",
+ "is_narwhals_series",
"is_numpy_array",
"is_pandas_dataframe",
"is_pandas_index",
diff --git a/narwhals/dtypes.py b/narwhals/dtypes.py
index b0434125..b086d216 100644
--- a/narwhals/dtypes.py
+++ b/narwhals/dtypes.py
@@ -386,6 +386,33 @@ class Datetime(TemporalType):
Notes:
Adapted from [Polars implementation](https://github.com/pola-rs/polars/blob/py-1.7.1/py-polars/polars/datatypes/classes.py#L398-L457)
+
+ Examples:
+ >>> import pandas as pd
+ >>> import polars as pl
+ >>> import pyarrow as pa
+ >>> import pyarrow.compute as pc
+ >>> import narwhals as nw
+ >>> from datetime import datetime, timedelta
+ >>> data = [datetime(2024, 12, 9) + timedelta(days=n) for n in range(5)]
+ >>> ser_pd = (
+ ... pd.Series(data)
+ ... .dt.tz_localize("Africa/Accra")
+ ... .astype("datetime64[ms, Africa/Accra]")
+ ... )
+ >>> ser_pl = (
+ ... pl.Series(data).cast(pl.Datetime("ms")).dt.replace_time_zone("Africa/Accra")
+ ... )
+ >>> ser_pa = pc.assume_timezone(
+ ... pa.chunked_array([data], type=pa.timestamp("ms")), "Africa/Accra"
+ ... )
+
+ >>> nw.from_native(ser_pd, series_only=True).dtype
+ Datetime(time_unit='ms', time_zone='Africa/Accra')
+ >>> nw.from_native(ser_pl, series_only=True).dtype
+ Datetime(time_unit='ms', time_zone='Africa/Accra')
+ >>> nw.from_native(ser_pa, series_only=True).dtype
+ Datetime(time_unit='ms', time_zone='Africa/Accra')
"""
def __init__(
diff --git a/narwhals/stable/v1/_dtypes.py b/narwhals/stable/v1/_dtypes.py
index 22cf05db..41dc854f 100644
--- a/narwhals/stable/v1/_dtypes.py
+++ b/narwhals/stable/v1/_dtypes.py
@@ -37,6 +37,33 @@ class Datetime(NwDatetime):
Notes:
Adapted from [Polars implementation](https://github.com/pola-rs/polars/blob/py-1.7.1/py-polars/polars/datatypes/classes.py#L398-L457)
+
+ Examples:
+ >>> import pandas as pd
+ >>> import polars as pl
+ >>> import pyarrow as pa
+ >>> import pyarrow.compute as pc
+ >>> import narwhals as nw
+ >>> from datetime import datetime, timedelta
+ >>> data = [datetime(2024, 12, 9) + timedelta(days=n) for n in range(5)]
+ >>> ser_pd = (
+ ... pd.Series(data)
+ ... .dt.tz_localize("Africa/Accra")
+ ... .astype("datetime64[ms, Africa/Accra]")
+ ... )
+ >>> ser_pl = (
+ ... pl.Series(data).cast(pl.Datetime("ms")).dt.replace_time_zone("Africa/Accra")
+ ... )
+ >>> ser_pa = pc.assume_timezone(
+ ... pa.chunked_array([data], type=pa.timestamp("ms")), "Africa/Accra"
+ ... )
+
+ >>> nw.from_native(ser_pd, series_only=True).dtype
+ Datetime(time_unit='ms', time_zone='Africa/Accra')
+ >>> nw.from_native(ser_pl, series_only=True).dtype
+ Datetime(time_unit='ms', time_zone='Africa/Accra')
+ >>> nw.from_native(ser_pa, series_only=True).dtype
+ Datetime(time_unit='ms', time_zone='Africa/Accra')
"""
def __hash__(self) -> int:
diff --git a/narwhals/stable/v1/dependencies.py b/narwhals/stable/v1/dependencies.py
index 6a020622..8ad1ad20 100644
--- a/narwhals/stable/v1/dependencies.py
+++ b/narwhals/stable/v1/dependencies.py
@@ -15,6 +15,9 @@ from narwhals.dependencies import is_into_dataframe
from narwhals.dependencies import is_into_series
from narwhals.dependencies import is_modin_dataframe
from narwhals.dependencies import is_modin_series
+from narwhals.dependencies import is_narwhals_dataframe
+from narwhals.dependencies import is_narwhals_lazyframe
+from narwhals.dependencies import is_narwhals_series
from narwhals.dependencies import is_numpy_array
from narwhals.dependencies import is_pandas_dataframe
from narwhals.dependencies import is_pandas_index
@@ -43,6 +46,9 @@ __all__ = [
"is_into_series",
"is_modin_dataframe",
"is_modin_series",
+ "is_narwhals_dataframe",
+ "is_narwhals_lazyframe",
+ "is_narwhals_series",
"is_numpy_array",
"is_pandas_dataframe",
"is_pandas_index",
| [Doc]: Add a docstring example in `narwhals.dtypes` for Datetime - beginner friendly
### What type of report is this?
Improvement
### Please describe the issue.
Thank you for contributing to Narwhals
May the Undwerwater Unicorn Magic be with You! 🌊🦄
Add a docstring example for Datetime - dtype, that will be visible here:
https://narwhals-dev.github.io/narwhals/api-reference/dtypes/#narwhals.dtypes.Datetime
Check https://github.com/narwhals-dev/narwhals/blob/main/CONTRIBUTING.md
for instructions how to set up your environment
Check https://narwhals-dev.github.io/narwhals/api-reference/dtypes/#narwhals.dtypes.List
as a reference of how it should look like
and https://github.com/narwhals-dev/narwhals/blob/main/narwhals/dtypes.py
as a reference on how to create docstrings
If you need any help just let us know!
Happy Contributing💻🎉
### If you have a suggestion on how it should be, add it below.
_No response_ | narwhals-dev/narwhals | diff --git a/tests/dependencies/is_narwhals_dataframe_test.py b/tests/dependencies/is_narwhals_dataframe_test.py
new file mode 100644
index 00000000..0a25bc8b
--- /dev/null
+++ b/tests/dependencies/is_narwhals_dataframe_test.py
@@ -0,0 +1,18 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+import narwhals as nw
+import narwhals.stable.v1 as nws
+from narwhals.stable.v1.dependencies import is_narwhals_dataframe
+
+if TYPE_CHECKING:
+ from tests.utils import ConstructorEager
+
+
+def test_is_narwhals_dataframe(constructor_eager: ConstructorEager) -> None:
+ df = constructor_eager({"col1": [1, 2], "col2": [3, 4]})
+
+ assert is_narwhals_dataframe(nw.from_native(df))
+ assert is_narwhals_dataframe(nws.from_native(df))
+ assert not is_narwhals_dataframe(df)
diff --git a/tests/dependencies/is_narwhals_lazyframe_test.py b/tests/dependencies/is_narwhals_lazyframe_test.py
new file mode 100644
index 00000000..1dcbe5fc
--- /dev/null
+++ b/tests/dependencies/is_narwhals_lazyframe_test.py
@@ -0,0 +1,19 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+import narwhals as nw
+import narwhals.stable.v1 as nws
+from narwhals.stable.v1.dependencies import is_narwhals_lazyframe
+from tests.utils import Constructor
+
+if TYPE_CHECKING:
+ from tests.utils import Constructor
+
+
+def test_is_narwhals_lazyframe(constructor: Constructor) -> None:
+ lf = constructor({"a": [1, 2, 3]})
+
+ assert is_narwhals_lazyframe(nw.from_native(lf).lazy())
+ assert is_narwhals_lazyframe(nws.from_native(lf).lazy())
+ assert not is_narwhals_lazyframe(lf)
diff --git a/tests/dependencies/is_narwhals_series_test.py b/tests/dependencies/is_narwhals_series_test.py
new file mode 100644
index 00000000..0beb3fc1
--- /dev/null
+++ b/tests/dependencies/is_narwhals_series_test.py
@@ -0,0 +1,18 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+import narwhals as nw
+import narwhals.stable.v1 as nws
+from narwhals.stable.v1.dependencies import is_narwhals_series
+
+if TYPE_CHECKING:
+ from tests.utils import ConstructorEager
+
+
+def test_is_narwhals_series(constructor_eager: ConstructorEager) -> None:
+ df = constructor_eager({"col1": [1, 2], "col2": [3, 4]})
+
+ assert is_narwhals_series(nw.from_native(df, eager_only=True)["col1"])
+ assert is_narwhals_series(nws.from_native(df, eager_only=True)["col1"])
+ assert not is_narwhals_series(nw.from_native(df, eager_only=True)["col1"].to_native())
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 4
} | 1.16 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
cfgv==3.4.0
click==8.1.8
cloudpickle==3.1.1
covdefaults==2.3.0
coverage==7.8.0
dask==2024.8.0
dask-expr==1.1.10
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
filelock==3.18.0
fsspec==2025.3.1
hypothesis==6.130.5
identify==2.6.9
importlib_metadata==8.6.1
iniconfig==2.1.0
joblib==1.4.2
locket==1.0.0
-e git+https://github.com/narwhals-dev/narwhals.git@d788eece6f1bee9e21d3840e1ec9e25799504331#egg=narwhals
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
partd==1.4.2
platformdirs==4.3.7
pluggy==1.5.0
polars==1.26.0
pre_commit==4.2.0
py4j==0.10.9.7
pyarrow==19.0.1
pyarrow-stubs==17.19
pyspark==3.5.5
pytest==8.3.5
pytest-cov==6.0.0
pytest-env==1.1.5
pytest-randomly==3.16.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
sortedcontainers==2.4.0
threadpoolctl==3.6.0
tomli==2.2.1
toolz==1.0.0
tqdm==4.67.1
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
zipp==3.21.0
| name: narwhals
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- cfgv==3.4.0
- click==8.1.8
- cloudpickle==3.1.1
- covdefaults==2.3.0
- coverage==7.8.0
- dask==2024.8.0
- dask-expr==1.1.10
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- fsspec==2025.3.1
- hypothesis==6.130.5
- identify==2.6.9
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- joblib==1.4.2
- locket==1.0.0
- narwhals==1.16.0
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- partd==1.4.2
- platformdirs==4.3.7
- pluggy==1.5.0
- polars==1.26.0
- pre-commit==4.2.0
- py4j==0.10.9.7
- pyarrow==19.0.1
- pyarrow-stubs==17.19
- pyspark==3.5.5
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-env==1.1.5
- pytest-randomly==3.16.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- sortedcontainers==2.4.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- toolz==1.0.0
- tqdm==4.67.1
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/narwhals
| [
"tests/dependencies/is_narwhals_dataframe_test.py::test_is_narwhals_dataframe[polars_eager_constructor]",
"tests/dependencies/is_narwhals_dataframe_test.py::test_is_narwhals_dataframe[pandas_pyarrow_constructor]",
"tests/dependencies/is_narwhals_dataframe_test.py::test_is_narwhals_dataframe[pyarrow_table_constructor]",
"tests/dependencies/is_narwhals_dataframe_test.py::test_is_narwhals_dataframe[pandas_nullable_constructor]",
"tests/dependencies/is_narwhals_dataframe_test.py::test_is_narwhals_dataframe[pandas_constructor]",
"tests/dependencies/is_narwhals_series_test.py::test_is_narwhals_series[pyarrow_table_constructor]",
"tests/dependencies/is_narwhals_series_test.py::test_is_narwhals_series[pandas_constructor]",
"tests/dependencies/is_narwhals_series_test.py::test_is_narwhals_series[pandas_nullable_constructor]",
"tests/dependencies/is_narwhals_series_test.py::test_is_narwhals_series[polars_eager_constructor]",
"tests/dependencies/is_narwhals_series_test.py::test_is_narwhals_series[pandas_pyarrow_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[pandas_nullable_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[pandas_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[pandas_pyarrow_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[dask_lazy_p2_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[polars_eager_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[polars_lazy_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[pyarrow_table_constructor]"
] | [] | [] | [] | MIT License | 20,435 | 1,885 | [
"narwhals/dependencies.py",
"narwhals/dtypes.py",
"narwhals/stable/v1/_dtypes.py",
"narwhals/stable/v1/dependencies.py"
] |
common-workflow-language__cwltool-2084 | 6c86caa0571fd186d90a6600e0bb405596d4a5e0 | 2024-12-09 12:27:50 | 6c86caa0571fd186d90a6600e0bb405596d4a5e0 | codecov[bot]: ## [Codecov](https://app.codecov.io/gh/common-workflow-language/cwltool/pull/2084?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=common-workflow-language) Report
Attention: Patch coverage is `55.55556%` with `8 lines` in your changes missing coverage. Please review.
> Project coverage is 70.58%. Comparing base [(`6c86caa`)](https://app.codecov.io/gh/common-workflow-language/cwltool/commit/6c86caa0571fd186d90a6600e0bb405596d4a5e0?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=common-workflow-language) to head [(`782a369`)](https://app.codecov.io/gh/common-workflow-language/cwltool/commit/782a369a6173610962643edc9bd663cdf9b2d314?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=common-workflow-language).
| [Files with missing lines](https://app.codecov.io/gh/common-workflow-language/cwltool/pull/2084?dropdown=coverage&src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=common-workflow-language) | Patch % | Lines |
|---|---|---|
| [cwltool/loghandler.py](https://app.codecov.io/gh/common-workflow-language/cwltool/pull/2084?src=pr&el=tree&filepath=cwltool%2Floghandler.py&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=common-workflow-language#diff-Y3dsdG9vbC9sb2doYW5kbGVyLnB5) | 54.54% | [4 Missing and 1 partial :warning: ](https://app.codecov.io/gh/common-workflow-language/cwltool/pull/2084?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=common-workflow-language) |
| [cwltool/main.py](https://app.codecov.io/gh/common-workflow-language/cwltool/pull/2084?src=pr&el=tree&filepath=cwltool%2Fmain.py&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=common-workflow-language#diff-Y3dsdG9vbC9tYWluLnB5) | 57.14% | [2 Missing and 1 partial :warning: ](https://app.codecov.io/gh/common-workflow-language/cwltool/pull/2084?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=common-workflow-language) |
> :exclamation: There is a different number of reports uploaded between BASE (6c86caa) and HEAD (782a369). Click for more details.
>
> <details><summary>HEAD has 16 uploads less than BASE</summary>
>
>| Flag | BASE (6c86caa) | HEAD (782a369) |
>|------|------|------|
>||17|1|
></details>
<details><summary>Additional details and impacted files</summary>
```diff
@@ Coverage Diff @@
## main #2084 +/- ##
===========================================
- Coverage 84.13% 70.58% -13.55%
===========================================
Files 46 46
Lines 8312 8316 +4
Branches 1959 1959
===========================================
- Hits 6993 5870 -1123
- Misses 840 1837 +997
- Partials 479 609 +130
```
</details>
[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/common-workflow-language/cwltool/pull/2084?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=common-workflow-language).
:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=common-workflow-language).
| diff --git a/cwltool/loghandler.py b/cwltool/loghandler.py
index 76daa8be..c7683081 100644
--- a/cwltool/loghandler.py
+++ b/cwltool/loghandler.py
@@ -11,7 +11,7 @@ _logger.setLevel(logging.INFO)
def configure_logging(
- stderr_handler: logging.Handler,
+ err_handler: logging.Handler,
no_warnings: bool,
quiet: bool,
debug: bool,
@@ -21,25 +21,29 @@ def configure_logging(
) -> None:
"""Configure logging."""
rdflib_logger = logging.getLogger("rdflib.term")
- rdflib_logger.addHandler(stderr_handler)
+ rdflib_logger.addHandler(err_handler)
rdflib_logger.setLevel(logging.ERROR)
deps_logger = logging.getLogger("galaxy.tool_util.deps")
- deps_logger.addHandler(stderr_handler)
+ deps_logger.addHandler(err_handler)
ss_logger = logging.getLogger("salad")
- ss_logger.addHandler(stderr_handler)
if no_warnings:
- stderr_handler.setLevel(logging.ERROR)
- if quiet:
+ err_handler.setLevel(logging.ERROR)
+ ss_logger.setLevel(logging.ERROR)
+ elif quiet:
# Silence STDERR, not an eventual provenance log file
- stderr_handler.setLevel(logging.WARN)
+ err_handler.setLevel(logging.WARN)
+ ss_logger.setLevel(logging.WARN)
+ else:
+ err_handler.setLevel(logging.INFO)
+ ss_logger.setLevel(logging.INFO)
if debug:
# Increase to debug for both stderr and provenance log file
base_logger.setLevel(logging.DEBUG)
- stderr_handler.setLevel(logging.DEBUG)
+ err_handler.setLevel(logging.DEBUG)
rdflib_logger.setLevel(logging.DEBUG)
deps_logger.setLevel(logging.DEBUG)
fmtclass = coloredlogs.ColoredFormatter if enable_color else logging.Formatter
formatter = fmtclass("%(levelname)s %(message)s")
if timestamps:
formatter = fmtclass("[%(asctime)s] %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S")
- stderr_handler.setFormatter(formatter)
+ err_handler.setFormatter(formatter)
diff --git a/cwltool/main.py b/cwltool/main.py
index 7aedce6b..17ccb11c 100755
--- a/cwltool/main.py
+++ b/cwltool/main.py
@@ -967,12 +967,6 @@ def main(
stdout = cast(IO[str], stdout)
_logger.removeHandler(defaultStreamHandler)
- stderr_handler = logger_handler
- if stderr_handler is not None:
- _logger.addHandler(stderr_handler)
- else:
- coloredlogs.install(logger=_logger, stream=stderr)
- stderr_handler = _logger.handlers[-1]
workflowobj = None
prov_log_handler: Optional[logging.StreamHandler[ProvOut]] = None
global docker_exe
@@ -997,6 +991,13 @@ def main(
if not args.cidfile_dir:
args.cidfile_dir = os.getcwd()
del args.record_container_id
+ if logger_handler is not None:
+ err_handler = logger_handler
+ _logger.addHandler(err_handler)
+ else:
+ coloredlogs.install(logger=_logger, stream=stdout if args.validate else stderr)
+ err_handler = _logger.handlers[-1]
+ logging.getLogger("salad").handlers = _logger.handlers
if runtimeContext is None:
runtimeContext = RuntimeContext(vars(args))
@@ -1015,7 +1016,7 @@ def main(
setattr(args, key, val)
configure_logging(
- stderr_handler,
+ err_handler,
args.no_warnings,
args.quiet,
runtimeContext.debug,
@@ -1413,8 +1414,7 @@ def main(
# public API for logging.StreamHandler
prov_log_handler.close()
close_ro(research_obj, args.provenance)
-
- _logger.removeHandler(stderr_handler)
+ _logger.removeHandler(err_handler)
_logger.addHandler(defaultStreamHandler)
| Warnings printed to stderr, --no-warning partially ignored
## Expected Behavior
Warnings should be printed to stdout not stderr. --no-warning should silence warnings
## Actual Behavior
warnings are printed to stderr. passing --no-warning does not silence warnings.
## Example output:
### with `--no-warning`
```
py_venv) samuel@Wald2:~/git/OpenMS$ cwltool --no-warning --validate --non-strict share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:21:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#db_out' previously defined
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:24:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#digest_out' previously defined
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:33:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#exp_ms2_out' previously defined
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:18:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#id_out' previously defined
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:27:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#lfq_out' previously defined
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:15:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#out' previously defined
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:30:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#theo_ms2_out' previously defined
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl is valid CWL.
```
### without `--no-warning`:
```
(py_venv) samuel@Wald2:~/git/OpenMS$ cwltool --validate --non-strict share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl 1>/dev/null
INFO /home/samuel/Downloads/py_venv/bin/cwltool 3.1.20241112140730
INFO Resolved 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl' to 'file:///home/samuel/git/OpenMS/share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl'
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:21:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#db_out' previously defined
WARNING share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:21:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#db_out' previously defined
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:24:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#digest_out' previously defined
WARNING share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:24:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#digest_out' previously defined
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:33:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#exp_ms2_out' previously defined
WARNING share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:33:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#exp_ms2_out' previously defined
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:18:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#id_out' previously defined
WARNING share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:18:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#id_out' previously defined
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:27:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#lfq_out' previously defined
WARNING share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:27:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#lfq_out' previously defined
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:15:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#out' previously defined
WARNING share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:15:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#out' previously defined
share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:30:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#theo_ms2_out' previously defined
WARNING share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl:30:3: object id 'share/OpenMS/commonwl/NucleicAcidSearchEngine.cwl#theo_ms2_out' previously defined
```
| common-workflow-language/cwltool | diff --git a/tests/test_examples.py b/tests/test_examples.py
index c6ec6d06..f413976f 100644
--- a/tests/test_examples.py
+++ b/tests/test_examples.py
@@ -1820,9 +1820,9 @@ def test_validate_optional_src_with_mandatory_sink() -> None:
["--validate", get_data("tests/wf/optional_src_mandatory_sink.cwl")]
)
assert exit_code == 0
- stderr = re.sub(r"\s\s+", " ", stderr)
- assert 'Source \'opt_file\' of type ["null", "File"] may be incompatible' in stderr
- assert "with sink 'r' of type \"File\"" in stderr
+ stdout = re.sub(r"\s\s+", " ", stdout)
+ assert 'Source \'opt_file\' of type ["null", "File"] may be incompatible' in stdout
+ assert "with sink 'r' of type \"File\"" in stdout
def test_res_req_expr_float_1_0() -> None:
@@ -1875,12 +1875,11 @@ def test_invalid_nested_array() -> None:
]
)
assert exit_code == 1, stderr
- stderr = re.sub(r"\n\s+", " ", stderr)
- stderr = re.sub(r"\s\s+", " ", stderr)
- assert "Tool definition failed validation:" in stderr
+ stdout = re.sub(r"\s\s+", " ", stdout)
+ assert "Tool definition failed validation:" in stdout
assert (
"tests/nested-array.cwl:6:5: Field 'type' references unknown identifier 'string[][]'"
- ) in stderr
+ ) in stdout
def test_input_named_id() -> None:
diff --git a/tests/test_validate.py b/tests/test_validate.py
index 171a6b6c..f2d89e47 100644
--- a/tests/test_validate.py
+++ b/tests/test_validate.py
@@ -1,5 +1,7 @@
"""Tests --validation."""
+import io
+import logging
import re
from .util import get_data, get_main_output
@@ -43,13 +45,83 @@ def test_validate_with_invalid_input_object() -> None:
]
)
assert exit_code == 1
- stderr = re.sub(r"\s\s+", " ", stderr)
- assert "Invalid job input record" in stderr
+ stdout = re.sub(r"\s\s+", " ", stdout)
+ assert "Invalid job input record" in stdout
assert (
"tests/wf/1st-workflow_bad_inputs.yml:2:1: * the 'ex' field is not "
- "valid because the value is not string" in stderr
+ "valid because the value is not string" in stdout
)
assert (
"tests/wf/1st-workflow_bad_inputs.yml:1:1: * the 'inp' field is not "
- "valid because is not a dict. Expected a File object." in stderr
+ "valid because is not a dict. Expected a File object." in stdout
+ )
+
+
+def test_validate_quiet() -> None:
+ """Ensure that --validate --quiet prints the correct amount of information."""
+ exit_code, stdout, stderr = get_main_output(
+ [
+ "--validate",
+ "--quiet",
+ get_data("tests/CometAdapter.cwl"),
+ ]
+ )
+ assert exit_code == 0
+ stdout = re.sub(r"\s\s+", " ", stdout)
+ assert "INFO" not in stdout
+ assert "INFO" not in stderr
+ assert "tests/CometAdapter.cwl:9:3: object id" in stdout
+ assert "tests/CometAdapter.cwl#out' previously defined" in stdout
+
+
+def test_validate_no_warnings() -> None:
+ """Ensure that --validate --no-warnings doesn't print any warnings."""
+ exit_code, stdout, stderr = get_main_output(
+ [
+ "--validate",
+ "--no-warnings",
+ get_data("tests/CometAdapter.cwl"),
+ ]
)
+ assert exit_code == 0
+ stdout = re.sub(r"\s\s+", " ", stdout)
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert "INFO" not in stdout
+ assert "INFO" not in stderr
+ assert "WARNING" not in stdout
+ assert "WARNING" not in stderr
+ assert "tests/CometAdapter.cwl:9:3: object id" not in stdout
+ assert "tests/CometAdapter.cwl:9:3: object id" not in stderr
+ assert "tests/CometAdapter.cwl#out' previously defined" not in stdout
+ assert "tests/CometAdapter.cwl#out' previously defined" not in stderr
+
+
+def test_validate_custom_logger() -> None:
+ """Custom log handling test."""
+ custom_log = io.StringIO()
+ handler = logging.StreamHandler(custom_log)
+ handler.setLevel(logging.DEBUG)
+ exit_code, stdout, stderr = get_main_output(
+ [
+ "--validate",
+ get_data("tests/CometAdapter.cwl"),
+ ],
+ logger_handler=handler,
+ )
+ custom_log_text = custom_log.getvalue()
+ assert exit_code == 0
+ custom_log_text = re.sub(r"\s\s+", " ", custom_log_text)
+ stdout = re.sub(r"\s\s+", " ", stdout)
+ stderr = re.sub(r"\s\s+", " ", stderr)
+ assert "INFO" not in stdout
+ assert "INFO" not in stderr
+ assert "INFO" in custom_log_text
+ assert "WARNING" not in stdout
+ assert "WARNING" not in stderr
+ assert "WARNING" in custom_log_text
+ assert "tests/CometAdapter.cwl:9:3: object id" not in stdout
+ assert "tests/CometAdapter.cwl:9:3: object id" not in stderr
+ assert "tests/CometAdapter.cwl:9:3: object id" in custom_log_text
+ assert "tests/CometAdapter.cwl#out' previously defined" not in stdout
+ assert "tests/CometAdapter.cwl#out' previously defined" not in stderr
+ assert "tests/CometAdapter.cwl#out' previously defined" in custom_log_text
diff --git a/tests/util.py b/tests/util.py
index 44d2f108..8dd0bf74 100644
--- a/tests/util.py
+++ b/tests/util.py
@@ -11,7 +11,7 @@ import sys
from collections.abc import Generator, Mapping
from contextlib import ExitStack
from pathlib import Path
-from typing import Optional, Union
+from typing import Any, Optional, Union
import pytest
@@ -88,6 +88,7 @@ def get_main_output(
replacement_env: Optional[Mapping[str, str]] = None,
extra_env: Optional[Mapping[str, str]] = None,
monkeypatch: Optional[pytest.MonkeyPatch] = None,
+ **extra_kwargs: Any,
) -> tuple[Optional[int], str, str]:
"""Run cwltool main.
@@ -113,7 +114,7 @@ def get_main_output(
monkeypatch.setenv(k, v)
try:
- rc = main(argsl=args, stdout=stdout, stderr=stderr)
+ rc = main(argsl=args, stdout=stdout, stderr=stderr, **extra_kwargs)
except SystemExit as e:
if isinstance(e.code, int):
rc = e.code
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 2
} | 3.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-retry"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libxml2-dev libxslt-dev nodejs"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | argcomplete==3.6.1
CacheControl==0.14.2
certifi==2025.1.31
charset-normalizer==3.4.1
coloredlogs==15.0.1
coverage==7.8.0
cwl-upgrader==1.2.12
cwl-utils==0.37
-e git+https://github.com/common-workflow-language/cwltool.git@6c86caa0571fd186d90a6600e0bb405596d4a5e0#egg=cwltool
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
humanfriendly==10.0
idna==3.10
iniconfig==2.1.0
isodate==0.7.2
lxml==5.3.1
mistune==3.0.2
msgpack==1.1.0
mypy-extensions==1.0.0
networkx==3.2.1
packaging==24.2
pluggy==1.5.0
prov==1.5.1
psutil==7.0.0
pydot==2.0.0
pyparsing==3.2.3
pytest==8.3.5
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-retry==1.7.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
rdflib==7.1.4
requests==2.32.3
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
schema-salad==8.8.20250205075315
six==1.17.0
spython==0.3.14
tomli==2.2.1
typing_extensions==4.13.0
urllib3==2.3.0
| name: cwltool
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argcomplete==3.6.1
- cachecontrol==0.14.2
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coloredlogs==15.0.1
- coverage==7.8.0
- cwl-upgrader==1.2.12
- cwl-utils==0.37
- cwltool==3.1.20241112140731.dev2+g6c86caa0
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- humanfriendly==10.0
- idna==3.10
- iniconfig==2.1.0
- isodate==0.7.2
- lxml==5.3.1
- mistune==3.0.2
- msgpack==1.1.0
- mypy-extensions==1.0.0
- networkx==3.2.1
- packaging==24.2
- pluggy==1.5.0
- prov==1.5.1
- psutil==7.0.0
- pydot==2.0.0
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-retry==1.7.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- rdflib==7.1.4
- requests==2.32.3
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.12
- schema-salad==8.8.20250205075315
- six==1.17.0
- spython==0.3.14
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==2.3.0
prefix: /opt/conda/envs/cwltool
| [
"tests/test_examples.py::test_validate_optional_src_with_mandatory_sink",
"tests/test_examples.py::test_invalid_nested_array",
"tests/test_validate.py::test_validate_with_invalid_input_object",
"tests/test_validate.py::test_validate_quiet"
] | [] | [
"tests/test_examples.py::test_expression_match[(foo",
"tests/test_examples.py::test_expression_match[(",
"tests/test_examples.py::test_expression_interpolate[-$(foo[\"b\\\\'ar\"].baz)--true]",
"tests/test_examples.py::test_expression_interpolate[-$(foo['b\\\\\"ar'].baz)--null]",
"tests/test_examples.py::test_expression_interpolate[$(foo.bar)",
"tests/test_examples.py::test_expression_interpolate[$(foo['bar'])",
"tests/test_examples.py::test_expression_interpolate[$(foo[\"bar\"])",
"tests/test_examples.py::test_expression_match[(foo[bar].baz)-False]",
"tests/test_examples.py::test_expression_interpolate_failures[-$(foo['barz'])]",
"tests/test_examples.py::test_expression_interpolate_failures[-$(foo[\"barz\"])]",
"tests/test_examples.py::test_expression_interpolate_failures[-$(foo.bar.bazz)]",
"tests/test_examples.py::test_expression_interpolate_failures[-$(foo['bar'].bazz)]",
"tests/test_examples.py::test_expression_interpolate_failures[-$(foo['bar'][\"bazz\"])]",
"tests/test_examples.py::test_expression_match[(foo['bar\"].baz)-False]",
"tests/test_examples.py::test_expression_match[(foo['bar].baz)-False]",
"tests/test_examples.py::test_expression_match[(foo['bar']['baz'])-True]",
"tests/test_examples.py::test_expression_match[(foo['b\\\\'ar']['baz'])-True]",
"tests/test_examples.py::test_expression_interpolate[-$(foo.bar.baz)--zab1]",
"tests/test_examples.py::test_expression_interpolate[-$(foo['bar'].baz)--zab1]",
"tests/test_examples.py::test_expression_interpolate[-$(foo['bar'][\"baz\"])--zab1]",
"tests/test_examples.py::test_expression_interpolate[-$(foo.bar['baz'])--zab1]",
"tests/test_examples.py::test_expression_interpolate[-$(foo['b",
"tests/test_examples.py::test_typechecking[src_type13-sink_type13-merge_flattened-None-exception]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\\\\\\\\\$(foo.bar.baz)-\\\\$(foo.bar.baz)-1]",
"tests/test_examples.py::test_scandeps",
"tests/test_examples.py::test_scandeps_samedirname",
"tests/test_examples.py::test_scandeps_collision",
"tests/test_examples.py::test_expression_interpolate_failures[$(foo.bar.bazz)]",
"tests/test_examples.py::test_typechecking[src_type7-sink_type7-merge_nested-None-pass]",
"tests/test_examples.py::test_expression_interpolate_failures[$(foo['b\\\\\"ar'].bazz)]",
"tests/test_examples.py::test_expression_interpolate_failures[$(lst[O])]",
"tests/test_examples.py::test_expression_interpolate_failures[$(lst[2])]",
"tests/test_examples.py::test_expression_interpolate_failures[$(lst.lengthz)]",
"tests/test_examples.py::test_expression_interpolate_failures[$(lst['lengthz'])]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\$foo-\\\\$foo-2]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\foo-\\\\foo-2]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\x-\\\\x-2]",
"tests/test_examples.py::test_expression_interpolate[$(lst[1])-B]",
"tests/test_examples.py::test_expression_interpolate[$(lst.length)-2]",
"tests/test_examples.py::test_expression_interpolate[$(lst['length'])-2]",
"tests/test_examples.py::test_expression_interpolate[-$(foo.bar)--{\"baz\":",
"tests/test_examples.py::test_expression_interpolate[-$(foo['bar'])--{\"baz\":",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\\\\\\\\\\\\\$(foo.bar.baz)-\\\\\\\\zab1-1]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\$foo-$foo-1]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\foo-foo-1]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\x-x-1]",
"tests/test_examples.py::test_expression_interpolate_failures[$(foo['bar'].bazz)]",
"tests/test_examples.py::test_expression_interpolate_failures[$(foo['bar'][\"bazz\"])]",
"tests/test_examples.py::test_expression_interpolate_failures[$(foo.bar['bazz'])]",
"tests/test_examples.py::test_expression_interpolate_failures[$(foo['b\\\\'ar'].bazz)]",
"tests/test_examples.py::test_typechecking[src_type14-sink_type14-merge_flattened-None-pass]",
"tests/test_examples.py::test_typechecking[src_type15-sink_type15-merge_flattened-None-warning]",
"tests/test_examples.py::test_typechecking[src_type16-sink_type16-merge_flattened-None-exception]",
"tests/test_examples.py::test_typechecking[src_type17-Any-merge_flattened-None-pass]",
"tests/test_examples.py::test_typechecking[src_type8-sink_type8-merge_nested-None-warning]",
"tests/test_examples.py::test_typechecking[src_type9-sink_type9-merge_nested-None-exception]",
"tests/test_examples.py::test_typechecking[src_type10-Any-merge_nested-None-pass]",
"tests/test_examples.py::test_typechecking[src_type11-sink_type11-merge_flattened-None-pass]",
"tests/test_examples.py::test_expression_match[(foo['b",
"tests/test_examples.py::test_expression_match[(foo_bar)-True]",
"tests/test_examples.py::test_expression_match[(foo.[\"bar\"])-False]",
"tests/test_examples.py::test_typechecking[src_type1-sink_type1-None-None-warning]",
"tests/test_examples.py::test_typechecking[src_type2-sink_type2-None-None-exception]",
"tests/test_examples.py::test_typechecking[src_type3-sink_type3-None-None-pass]",
"tests/test_examples.py::test_typechecking[src_type4-sink_type4-None-None-warning]",
"tests/test_examples.py::test_typechecking[src_type5-sink_type5-None-None-exception]",
"tests/test_examples.py::test_expression_match[(foo)-True]",
"tests/test_examples.py::test_expression_match[(foo.bar)-True]",
"tests/test_examples.py::test_trick_scandeps",
"tests/test_examples.py::test_scandeps_defaults_with_secondaryfiles",
"tests/test_examples.py::test_compare_types[0-source0-sink0-True]",
"tests/test_examples.py::test_expression_interpolate[$(foo['bar'].baz)",
"tests/test_examples.py::test_expression_interpolate[$(foo['bar'][\"baz\"])",
"tests/test_examples.py::test_expression_interpolate[$(foo.bar['baz'])",
"tests/test_examples.py::test_expression_interpolate[$(foo['b",
"tests/test_examples.py::test_expression_interpolate[$(foo['b\\\\'ar'].baz)",
"tests/test_examples.py::test_expression_interpolate[$(foo['b\\\\\"ar'].baz)",
"tests/test_examples.py::test_expression_interpolate[$(foo['bar'][\"baz\"])-zab1]",
"tests/test_examples.py::test_expression_interpolate[$(foo.bar['baz'])-zab1]",
"tests/test_examples.py::test_expression_interpolate[$(foo['b\\\\'ar'].baz)-True]",
"tests/test_examples.py::test_expression_interpolate[$(foo[\"b'ar\"].baz)-True]",
"tests/test_examples.py::test_expression_interpolate[$(foo['b\\\\\"ar'].baz)-None]",
"tests/test_examples.py::test_expression_interpolate[$(foo)-expected0]",
"tests/test_examples.py::test_expression_interpolate[$(foo.bar)-expected1]",
"tests/test_examples.py::test_expression_interpolate[$(foo['bar'])-expected2]",
"tests/test_examples.py::test_expression_interpolate[$(foo[\"bar\"])-expected3]",
"tests/test_examples.py::test_expression_interpolate[$(foo.bar.baz)-zab1]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\\\\\x-\\\\x-2]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\\\\\\\\\x-\\\\\\\\x-2]",
"tests/test_examples.py::test_compare_types_strict[0-source0-sink0-True]",
"tests/test_examples.py::test_compare_types_strict[1-source1-sink1-True]",
"tests/test_examples.py::test_compare_types_strict[2-source2-sink2-False]",
"tests/test_examples.py::test_compare_types_strict[3-source3-sink3-True]",
"tests/test_examples.py::test_compare_types_strict[4-source4-sink4-False]",
"tests/test_examples.py::test_compare_types[1-source1-sink1-True]",
"tests/test_examples.py::test_compare_types[2-source2-sink2-True]",
"tests/test_examples.py::test_compare_types[3-source3-sink3-False]",
"tests/test_examples.py::test_compare_types[record",
"tests/test_examples.py::test_expression_match[(foo['bar'])-True]",
"tests/test_examples.py::test_expression_match[(foo[\"bar\"])-True]",
"tests/test_examples.py::test_expression_match[(foo.bar.baz)-True]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\\\\\\\\\x-\\\\x-1]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\\\\\\\\\\\\\x-\\\\\\\\x-1]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\$(foo.bar.baz)-$(foo.bar.baz)-2]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\\\\\$(foo.bar.baz)-\\\\zab1-2]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\\\\\\\\\$(foo.bar.baz)-\\\\$(foo.bar.baz)-2]",
"tests/test_examples.py::test_expression_interpolate_failures[-$(foo['b",
"tests/test_examples.py::test_expression_interpolate_failures[-$(foo['b\\\\'ar'].bazz)]",
"tests/test_examples.py::test_expression_interpolate_failures[-$(foo[\"b\\\\'ar\"].bazz)]",
"tests/test_examples.py::test_expression_interpolate_failures[-$(foo['b\\\\\"ar'].bazz)]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\$(foo.bar.baz)-$(foo.bar.baz)-1]",
"tests/test_examples.py::test_expression_match[(foo.bar-False]",
"tests/test_examples.py::test_expression_match[foo.bar)-False]",
"tests/test_examples.py::test_expression_match[foo.b",
"tests/test_examples.py::test_expression_match[foo.b'ar)-False]",
"tests/test_examples.py::test_expression_match[(foo+bar-False]",
"tests/test_examples.py::test_typechecking[src_type19-sink_type19-merge_flattened-None-pass]",
"tests/test_examples.py::test_typechecking[src_type20-sink_type20-merge_flattened-special",
"tests/test_examples.py::test_typechecking[src_type0-sink_type0-None-None-pass]",
"tests/test_examples.py::test_typechecking[src_type12-sink_type12-merge_flattened-None-warning]",
"tests/test_examples.py::test_typechecking[src_type6-sink_type6-merge_nested-None-exception]",
"tests/test_examples.py::test_typechecking[src_type18-Any-merge_flattened-None-pass]",
"tests/test_examples.py::test_dedupe",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\\\\\\\\\\\\\x-\\\\\\\\x-2]",
"tests/test_examples.py::test_expression_interpolate_failures[$(foo['barz'])]",
"tests/test_examples.py::test_expression_interpolate_failures[$(foo.barz)]",
"tests/test_examples.py::test_factory_redefined_args",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\\\\\\\\\\\\\$(foo.bar.baz)-\\\\\\\\zab1-2]",
"tests/test_examples.py::test_parameter_to_expression[-$(foo)--{\"bar\":{\"baz\":\"zab1\"},\"b",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\\\\\$(foo.bar.baz)-\\\\zab1-1]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\$(foo.bar.baz)-$(foo.bar.baz)-1]",
"tests/test_examples.py::test_expression_interpolate_failures[-$(foo.barz)]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\\\\\$(foo.bar.baz)-\\\\zab1-1]",
"tests/test_examples.py::test_expression_interpolate_escapebehavior[\\\\\\\\x-\\\\x-1]",
"tests/test_examples.py::test_parameter_to_expression[-$(foo.bar)--{\"baz\":\"zab1\"}]",
"tests/test_examples.py::test_expression_interpolate_failures[$(foo[\"b'ar\"].bazz)]",
"tests/test_examples.py::test_parameter_to_expression[-$(foo['bar'])--{\"baz\":\"zab1\"}]",
"tests/test_examples.py::test_expression_interpolate_failures[-$(foo.bar['bazz'])]",
"tests/test_examples.py::test_parameter_to_expression[-$(foo[\"bar\"])--{\"baz\":\"zab1\"}]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\\\\\\\\\x-\\\\\\\\x-2]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\\\\\\\\\\\\\x-\\\\\\\\x-2]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\x-x-1]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\\\\\x-\\\\x-1]",
"tests/test_examples.py::test_expression_interpolate_failures[$(fooz)]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\\\\\\\\\$(foo.bar.baz)-\\\\$(foo.bar.baz)-2]",
"tests/test_examples.py::test_expression_interpolate[-$(foo['b\\\\'ar'].baz)--true]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\\\\\\\\\\\\\$(foo.bar.baz)-\\\\\\\\zab1-2]",
"tests/test_examples.py::test_parameter_to_expression[-$(foo['b\\\\\"ar'].baz)--null]",
"tests/test_examples.py::test_expression_interpolate[$(foo[\"b\\\\'ar\"].baz)",
"tests/test_examples.py::test_parameter_to_expression[-$(foo['bar'].baz)--zab1]",
"tests/test_examples.py::test_expression_interpolate[$(foo.bar.baz)",
"tests/test_examples.py::test_parameter_to_expression[$(foo.bar)",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\\\\\\\\\x-\\\\x-1]",
"tests/test_examples.py::test_parameter_to_expression[-$(foo.bar.baz)--zab1]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\\\\\\\\\$(foo.bar.baz)-\\\\$(foo.bar.baz)-1]",
"tests/test_examples.py::test_parameter_to_expression[-$(foo['bar'][\"baz\"])--zab1]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\$foo-\\\\$foo-2]",
"tests/test_examples.py::test_parameter_to_expression[$(foo['bar'])",
"tests/test_examples.py::test_expression_interpolate[-$(foo[\"bar\"])--{\"baz\":",
"tests/test_examples.py::test_factory_default_args",
"tests/test_examples.py::test_parameter_to_expression[-$(foo.bar['baz'])--zab1]",
"tests/test_examples.py::test_parameter_to_expression[-$(foo['b",
"tests/test_examples.py::test_parameter_to_expression[$(foo[\"b\\\\'ar\"].baz)",
"tests/test_examples.py::test_expression_interpolate[$(lst[0])-A]",
"tests/test_examples.py::test_v1_0_position_expression[--debug]",
"tests/test_examples.py::test_parameter_to_expression[$(foo['bar'][\"baz\"])",
"tests/test_examples.py::test_cache_default_literal_file[]",
"tests/test_examples.py::test_factory",
"tests/test_examples.py::test_factory_bad_outputs",
"tests/test_examples.py::test_lifting",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\\\\\\\\\\\\\$(foo.bar.baz)-\\\\\\\\zab1-1]",
"tests/test_examples.py::test_expression_match[(foo['bar'].baz)-True]",
"tests/test_examples.py::test_expression_interpolate[$(foo['bar'].baz)-zab1]",
"tests/test_examples.py::test_expression_match[(.foo[\"bar\"])-False]",
"tests/test_examples.py::test_expression_match[{foo}-False]",
"tests/test_examples.py::test_parameter_to_expression[$(foo['bar'].baz)",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\\\\\x-\\\\x-2]",
"tests/test_examples.py::test_v1_0_position_expression[]",
"tests/test_examples.py::test_var_spool_cwl_checker3",
"tests/test_examples.py::test_parameter_to_expression[$(foo.bar['baz'])",
"tests/test_examples.py::test_malformed_outputs",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\$(foo.bar.baz)-$(foo.bar.baz)-2]",
"tests/test_examples.py::test_parameter_to_expression[$(foo.bar.baz)",
"tests/test_examples.py::test_parameter_to_expression[$(foo['b\\\\'ar'].baz)",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\foo-\\\\foo-2]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\\\\\$(foo.bar.baz)-\\\\zab1-2]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\foo-foo-1]",
"tests/test_examples.py::test_bad_basecommand[]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\\\\\\\\\\\\\x-\\\\\\\\x-1]",
"tests/test_examples.py::test_expression_interpolate_failures[$(foo[\"barz\"])]",
"tests/test_examples.py::test_env_filtering[--debug]",
"tests/test_examples.py::test_parameter_to_expression[$(foo[\"bar\"])",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\x-\\\\x-2]",
"tests/test_examples.py::test_parameter_to_expression_interpolate_escapebehavior[\\\\$foo-$foo-1]",
"tests/test_examples.py::test_bad_stdout_expr_error",
"tests/test_examples.py::test_var_spool_cwl_checker1",
"tests/test_examples.py::test_bad_stderr_expr_error",
"tests/test_examples.py::test_write_summary",
"tests/test_examples.py::test_factory_partial_scatter",
"tests/test_examples.py::test_v1_0_position_expression[--parallel",
"tests/test_examples.py::test_env_filtering[]",
"tests/test_examples.py::test_v1_0_position_expression[--parallel]",
"tests/test_examples.py::test_record_default_with_long",
"tests/test_examples.py::test_cache_default_literal_file[--parallel]",
"tests/test_examples.py::test_operation_class",
"tests/test_examples.py::test_custom_type_in_step_process",
"tests/test_examples.py::test_command_line_tool_class",
"tests/test_examples.py::test_bad_basecommand[--parallel]",
"tests/test_examples.py::test_optional_numeric_output_0[]",
"tests/test_examples.py::tests_outputsource_valid_identifier_invalid_source",
"tests/test_examples.py::test_v1_0_arg_empty_prefix_separate_false",
"tests/test_examples.py::test_bad_basecommand[--parallel",
"tests/test_examples.py::test_expression_tool_class",
"tests/test_examples.py::test_bad_timelimit_expr",
"tests/test_examples.py::test_format_expr_error",
"tests/test_examples.py::test_very_small_and_large_floats",
"tests/test_validate.py::test_validate_with_valid_input_object",
"tests/test_examples.py::test_make_template",
"tests/test_examples.py::test_malformed_reqs",
"tests/test_examples.py::test_factory_partial_output",
"tests/test_examples.py::test_env_filtering[--parallel",
"tests/test_examples.py::test_var_spool_cwl_checker2",
"tests/test_examples.py::test_malformed_hints",
"tests/test_examples.py::test_static_checker",
"tests/test_examples.py::test_input_named_id",
"tests/test_validate.py::test_validate_no_warnings",
"tests/test_examples.py::test_mismatched_optional_arrays",
"tests/test_examples.py::test_res_req_expr_float_1_0",
"tests/test_examples.py::test_parameter_to_expression[$(foo['b",
"tests/test_examples.py::test_res_req_expr_float_1_2",
"tests/test_validate.py::test_validate_custom_logger",
"tests/test_examples.py::test_no_js_console[]",
"tests/test_examples.py::test_glob_expr_error",
"tests/test_examples.py::test_scatter_output_filenames",
"tests/test_examples.py::test_bad_networkaccess_expr",
"tests/test_examples.py::test_format_expr_error2",
"tests/test_examples.py::test_bad_stdin_expr_error",
"tests/test_examples.py::test_parameter_to_expression[$(foo['b\\\\\"ar'].baz)",
"tests/test_examples.py::test_record_outputeval",
"tests/test_examples.py::test_cache_default_literal_file[--parallel",
"tests/test_examples.py::test_separate_without_prefix",
"tests/test_examples.py::test_bad_basecommand[--debug]",
"tests/test_examples.py::test_parameter_to_expression[-$(foo['b\\\\'ar'].baz)--true]",
"tests/test_examples.py::test_parameter_to_expression[-$(foo[\"b\\\\'ar\"].baz)--true]",
"tests/test_examples.py::test_no_js_console[--debug]",
"tests/test_examples.py::test_v1_1_position_badexpression[]",
"tests/test_examples.py::test_optional_numeric_output_0[--parallel]",
"tests/test_examples.py::test_staging_files_in_any",
"tests/test_examples.py::test_optional_numeric_output_0[--parallel",
"tests/test_examples.py::test_v1_1_position_badexpression[--parallel",
"tests/test_examples.py::test_v1_1_position_badexpression[--parallel]",
"tests/test_examples.py::test_js_console_cmd_line_tool[--debug]",
"tests/test_examples.py::test_print_dot",
"tests/test_examples.py::test_cache_default_literal_file[--debug]",
"tests/test_examples.py::test_optional_numeric_output_0[--debug]",
"tests/test_examples.py::test_js_console_cmd_line_tool[--parallel]",
"tests/test_validate.py::test_validate_graph_with_no_default",
"tests/test_examples.py::test_env_filtering[--parallel]",
"tests/test_examples.py::test_no_js_console[--parallel",
"tests/test_examples.py::test_no_js_console[--parallel]",
"tests/test_examples.py::test_js_console_cmd_line_tool[--parallel",
"tests/test_examples.py::test_js_console_cmd_line_tool[]",
"tests/test_examples.py::test_v1_1_position_badexpression[--debug]"
] | [] | Apache License 2.0 | 20,438 | 902 | [
"cwltool/loghandler.py",
"cwltool/main.py"
] |
tobymao__sqlglot-4488 | 3db54b137484b0496d9cbda003f5c747f5965e64 | 2024-12-09 14:05:46 | ab108518c53173ddf71ac1dfd9e45df6ac621b81 | diff --git a/sqlglot/dialects/duckdb.py b/sqlglot/dialects/duckdb.py
index f3ef144a..946410f5 100644
--- a/sqlglot/dialects/duckdb.py
+++ b/sqlglot/dialects/duckdb.py
@@ -1021,3 +1021,12 @@ class DuckDB(Dialect):
return self.func(
"REGEXP_EXTRACT", expression.this, expression.expression, group, params
)
+
+ @unsupported_args("culture")
+ def numbertostr_sql(self, expression: exp.NumberToStr) -> str:
+ fmt = expression.args.get("format")
+ if fmt and fmt.is_int:
+ return self.func("FORMAT", f"'{{:,.{fmt.name}f}}'", expression.this)
+
+ self.unsupported("Only integer formats are supported by NumberToStr")
+ return self.function_fallback_sql(expression)
diff --git a/sqlglot/dialects/mysql.py b/sqlglot/dialects/mysql.py
index 8cadd4ba..4dd35180 100644
--- a/sqlglot/dialects/mysql.py
+++ b/sqlglot/dialects/mysql.py
@@ -307,6 +307,7 @@ class MySQL(Dialect):
"DAYOFMONTH": lambda args: exp.DayOfMonth(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
"DAYOFWEEK": lambda args: exp.DayOfWeek(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
"DAYOFYEAR": lambda args: exp.DayOfYear(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
+ "FORMAT": exp.NumberToStr.from_arg_list,
"FROM_UNIXTIME": build_formatted_time(exp.UnixToTime, "mysql"),
"ISNULL": isnull_to_is_null,
"LOCATE": locate_to_strposition,
@@ -735,6 +736,7 @@ class MySQL(Dialect):
exp.Month: _remove_ts_or_ds_to_date(),
exp.NullSafeEQ: lambda self, e: self.binary(e, "<=>"),
exp.NullSafeNEQ: lambda self, e: f"NOT {self.binary(e, '<=>')}",
+ exp.NumberToStr: rename_func("FORMAT"),
exp.Pivot: no_pivot_sql,
exp.Select: transforms.preprocess(
[
| MySQL's numeric `format` function is not properly translated to DuckDB
https://dev.mysql.com/doc/refman/8.4/en/string-functions.html#function_format
```sh
$ # sqlglot-25.32.0
$ python3
Python 3.9.6 (default, Feb 3 2024, 15:58:27)
[Clang 15.0.0 (clang-1500.3.9.4)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import sqlglot
>>> sqlglot.transpile("SELECT format(123456.789, 3)", read="mysql", write="duckdb")[0]
'SELECT FORMAT(123456.789, 3)'
>>>
```
It appears that this function could be translated to duckdb's numeric `format` function: https://duckdb.org/docs/sql/functions/char#print-numbers-with-thousand-separators | tobymao/sqlglot | diff --git a/tests/dialects/test_mysql.py b/tests/dialects/test_mysql.py
index 18cd3740..465c2317 100644
--- a/tests/dialects/test_mysql.py
+++ b/tests/dialects/test_mysql.py
@@ -1,7 +1,7 @@
import unittest
import sys
-from sqlglot import expressions as exp
+from sqlglot import UnsupportedError, expressions as exp
from sqlglot.dialects.mysql import MySQL
from tests.dialects.test_dialect import Validator
@@ -1344,3 +1344,33 @@ COMMENT='客户账户表'"""
for format in ("JSON", "TRADITIONAL", "TREE"):
self.validate_identity(f"DESCRIBE FORMAT={format} UPDATE test SET test_col = 'abc'")
+
+ def test_number_format(self):
+ self.validate_all(
+ "SELECT FORMAT(12332.123456, 4)",
+ write={
+ "duckdb": "SELECT FORMAT('{:,.4f}', 12332.123456)",
+ "mysql": "SELECT FORMAT(12332.123456, 4)",
+ },
+ )
+ self.validate_all(
+ "SELECT FORMAT(12332.1, 4)",
+ write={
+ "duckdb": "SELECT FORMAT('{:,.4f}', 12332.1)",
+ "mysql": "SELECT FORMAT(12332.1, 4)",
+ },
+ )
+ self.validate_all(
+ "SELECT FORMAT(12332.2, 0)",
+ write={
+ "duckdb": "SELECT FORMAT('{:,.0f}', 12332.2)",
+ "mysql": "SELECT FORMAT(12332.2, 0)",
+ },
+ )
+ self.validate_all(
+ "SELECT FORMAT(12332.2, 2, 'de_DE')",
+ write={
+ "duckdb": UnsupportedError,
+ "mysql": "SELECT FORMAT(12332.2, 2, 'de_DE')",
+ },
+ )
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 25.34 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
identify==2.6.9
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
Pygments==2.19.1
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.7.2
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@3db54b137484b0496d9cbda003f5c747f5965e64#egg=sqlglot
tomli==2.2.1
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- identify==2.6.9
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pygments==2.19.1
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.7.2
- six==1.17.0
- tomli==2.2.1
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_mysql.py::TestMySQL::test_number_format"
] | [] | [
"tests/dialects/test_mysql.py::TestMySQL::test_at_time_zone",
"tests/dialects/test_mysql.py::TestMySQL::test_bits_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_canonical_functions",
"tests/dialects/test_mysql.py::TestMySQL::test_convert",
"tests/dialects/test_mysql.py::TestMySQL::test_date_format",
"tests/dialects/test_mysql.py::TestMySQL::test_ddl",
"tests/dialects/test_mysql.py::TestMySQL::test_escape",
"tests/dialects/test_mysql.py::TestMySQL::test_explain",
"tests/dialects/test_mysql.py::TestMySQL::test_grant",
"tests/dialects/test_mysql.py::TestMySQL::test_hexadecimal_literal",
"tests/dialects/test_mysql.py::TestMySQL::test_identity",
"tests/dialects/test_mysql.py::TestMySQL::test_introducers",
"tests/dialects/test_mysql.py::TestMySQL::test_is_null",
"tests/dialects/test_mysql.py::TestMySQL::test_json_object",
"tests/dialects/test_mysql.py::TestMySQL::test_json_value",
"tests/dialects/test_mysql.py::TestMySQL::test_match_against",
"tests/dialects/test_mysql.py::TestMySQL::test_monthname",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql",
"tests/dialects/test_mysql.py::TestMySQL::test_mysql_time",
"tests/dialects/test_mysql.py::TestMySQL::test_safe_div",
"tests/dialects/test_mysql.py::TestMySQL::test_set_variable",
"tests/dialects/test_mysql.py::TestMySQL::test_show_columns",
"tests/dialects/test_mysql.py::TestMySQL::test_show_db_like_or_where_sql",
"tests/dialects/test_mysql.py::TestMySQL::test_show_engine",
"tests/dialects/test_mysql.py::TestMySQL::test_show_errors",
"tests/dialects/test_mysql.py::TestMySQL::test_show_events",
"tests/dialects/test_mysql.py::TestMySQL::test_show_grants",
"tests/dialects/test_mysql.py::TestMySQL::test_show_index",
"tests/dialects/test_mysql.py::TestMySQL::test_show_like_or_where",
"tests/dialects/test_mysql.py::TestMySQL::test_show_name",
"tests/dialects/test_mysql.py::TestMySQL::test_show_processlist",
"tests/dialects/test_mysql.py::TestMySQL::test_show_profile",
"tests/dialects/test_mysql.py::TestMySQL::test_show_replica_status",
"tests/dialects/test_mysql.py::TestMySQL::test_show_simple",
"tests/dialects/test_mysql.py::TestMySQL::test_show_tables",
"tests/dialects/test_mysql.py::TestMySQL::test_string_literals",
"tests/dialects/test_mysql.py::TestMySQL::test_timestamp_trunc",
"tests/dialects/test_mysql.py::TestMySQL::test_types"
] | [] | MIT License | 20,439 | 549 | [
"sqlglot/dialects/duckdb.py",
"sqlglot/dialects/mysql.py"
] |
|
tefra__xsdata-1107 | 474b072cd05c2f7368ad1680aff7515b9d009f0a | 2024-12-09 15:30:35 | 474b072cd05c2f7368ad1680aff7515b9d009f0a | sonarcloud[bot]: ## [](https://sonarcloud.io/dashboard?id=tefra_xsdata&pullRequest=1107) **Quality Gate passed**
Issues
 [0 New issues](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=1107&issueStatuses=OPEN,CONFIRMED&sinceLeakPeriod=true)
 [0 Accepted issues](https://sonarcloud.io/project/issues?id=tefra_xsdata&pullRequest=1107&issueStatuses=ACCEPTED)
Measures
 [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=tefra_xsdata&pullRequest=1107&issueStatuses=OPEN,CONFIRMED&sinceLeakPeriod=true)
 [0.0% Coverage on New Code](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=1107&metric=new_coverage&view=list)
 [0.0% Duplication on New Code](https://sonarcloud.io/component_measures?id=tefra_xsdata&pullRequest=1107&metric=new_duplicated_lines_density&view=list)
[See analysis details on SonarQube Cloud](https://sonarcloud.io/dashboard?id=tefra_xsdata&pullRequest=1107)
codecov[bot]: ## [Codecov](https://app.codecov.io/gh/tefra/xsdata/pull/1107?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou) Report
All modified and coverable lines are covered by tests :white_check_mark:
> Project coverage is 99.95%. Comparing base [(`474b072`)](https://app.codecov.io/gh/tefra/xsdata/commit/474b072cd05c2f7368ad1680aff7515b9d009f0a?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou) to head [(`16b9c7f`)](https://app.codecov.io/gh/tefra/xsdata/commit/16b9c7fd04af9cc436984ab14c73afaaf3768f81?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou).
<details><summary>Additional details and impacted files</summary>
```diff
@@ Coverage Diff @@
## main #1107 +/- ##
===========================================
- Coverage 100.00% 99.95% -0.05%
===========================================
Files 115 115
Lines 9299 9299
Branches 1417 1417
===========================================
- Hits 9299 9295 -4
- Misses 0 3 +3
- Partials 0 1 +1
```
</details>
[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/tefra/xsdata/pull/1107?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou).
:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Chris+Tsou).
| diff --git a/xsdata/codegen/handlers/process_attributes_types.py b/xsdata/codegen/handlers/process_attributes_types.py
index adba61e1..2475be57 100644
--- a/xsdata/codegen/handlers/process_attributes_types.py
+++ b/xsdata/codegen/handlers/process_attributes_types.py
@@ -113,16 +113,19 @@ class ProcessAttributeTypes(RelativeHandlerInterface):
if attr.restrictions.pattern:
cls.reset_attribute_type(attr_type)
- def find_dependency(self, attr_type: AttrType, tag: str) -> Optional[Class]:
+ def find_dependency(
+ self, target: Class, attr_type: AttrType, tag: str
+ ) -> Optional[Class]:
"""Find the source type from the attr type and tag.
Avoid conflicts by selecting any matching type by qname and preferably:
- 1. Match the candidate object tag
- 2. Match element again complexType
+ 1. Match element again complexType with no self reference
+ 2. Match the candidate object tag
3. Match non element and complexType
4. Anything
Args:
+ target: The target class instance
attr_type: The attr type instance
tag: The xml tag name, e.g. Element, Attribute, ComplexType
@@ -130,8 +133,10 @@ class ProcessAttributeTypes(RelativeHandlerInterface):
The source class or None if no match is found
"""
conditions = (
+ lambda obj: tag == Tag.ELEMENT
+ and obj.tag == Tag.COMPLEX_TYPE
+ and obj is not target,
lambda obj: obj.tag == tag,
- lambda obj: tag == Tag.ELEMENT and obj.tag == Tag.COMPLEX_TYPE,
lambda obj: not obj.is_complex_type,
lambda x: True,
)
@@ -184,7 +189,7 @@ class ProcessAttributeTypes(RelativeHandlerInterface):
attr: The attr instance
attr_type: The attr type instance
"""
- source = self.find_dependency(attr_type, attr.tag)
+ source = self.find_dependency(target, attr_type, attr.tag)
if not source:
logger.warning("Reset absent type: %s", attr_type.name)
self.reset_attribute_type(attr_type)
| xsdata creates two classes for the same type
I am not sure what is going on, but there seems to be two dataclasses defined from the same type. The same problem shows up in different schemas from here: http://www.accellera.org/XMLSchema
Specifically the problematic type is portAccessType defined in:
http://www.accellera.org/XMLSchema/SPIRIT/1.5/port.xsd
http://www.accellera.org/XMLSchema/SPIRIT/1685-2009/port.xsd
http://www.accellera.org/XMLSchema/IPXACT/1685-2014/port.xsd
http://www.accellera.org/XMLSchema/IPXACT/1685-2022/port.xsd
They all show the same issue.
I see two python files (`port_access_type.py` and `port_access_type_1.py`) generated with classes `PortAccessType` and `PortAccessType1` respectively. Both of these have:
```python
class Meta:
name = "portAccessType"
```
Here are the two files:
***port_access_type.py***
```python
from dataclasses import dataclass, field
from typing import Optional
from org.accellera.ipxact.v1685_2014.simple_port_access_type import (
SimplePortAccessType,
)
__NAMESPACE__ = "http://www.accellera.org/XMLSchema/IPXACT/1685-2014"
@dataclass(slots=True)
class PortAccessType:
"""Indicates how a netlister accesses a port.
'ref' means accessed by reference (default) and 'ptr' means accessed
by pointer.
"""
class Meta:
name = "portAccessType"
namespace = "http://www.accellera.org/XMLSchema/IPXACT/1685-2014"
value: Optional[SimplePortAccessType] = field(
default=None,
metadata={
"required": True,
},
)
```
***port_access_type1.py***
```python
from collections.abc import Iterable
from dataclasses import dataclass, field
from typing import Optional
from org.accellera.ipxact.v1685_2014.leaf_access_handle import LeafAccessHandle
from org.accellera.ipxact.v1685_2014.port_access_type import PortAccessType
__NAMESPACE__ = "http://www.accellera.org/XMLSchema/IPXACT/1685-2014"
@dataclass(slots=True)
class PortAccessType1:
"""
:ivar port_access_type: Indicates how a netlister accesses a port.
'ref' means accessed by reference (default) and 'ptr' means
accessed through a pointer.
:ivar access_handles:
"""
class Meta:
name = "portAccessType"
port_access_type: Optional[PortAccessType] = field(
default=None,
metadata={
"name": "portAccessType",
"type": "Element",
"namespace": "http://www.accellera.org/XMLSchema/IPXACT/1685-2014",
},
)
access_handles: Optional["PortAccessType1.AccessHandles"] = field(
default=None,
metadata={
"name": "accessHandles",
"type": "Element",
"namespace": "http://www.accellera.org/XMLSchema/IPXACT/1685-2014",
},
)
@dataclass(slots=True)
class AccessHandles:
access_handle: Iterable[LeafAccessHandle] = field(
default_factory=list,
metadata={
"name": "accessHandle",
"type": "Element",
"namespace": "http://www.accellera.org/XMLSchema/IPXACT/1685-2014",
"min_occurs": 1,
},
)
```
I see a number of duplicate files with `*1.py`, `*2.py` with the same class Meta names!
http://www.accellera.org/XMLSchema/SPIRIT/1685-2009-VE-1.0/
http://www.accellera.org/XMLSchema
Here are the commands I use to generate the binding classes.
```bash
$ xsdata generate --slots --subscriptable-types --generic-collections --relative-imports --structure-style filenames --package org.accellera.spirit.v1_5 http://www.accellera.org/XMLSchema/SPIRIT/1.5/index.xsd
$ xsdata generate --slots --subscriptable-types --generic-collections --relative-imports --structure-style filenames --package org.accellera.spirit.v1685_2009 http://www.accellera.org/XMLSchema/SPIRIT/1685-2009/index.xsd
$ xsdata generate --slots --subscriptable-types --generic-collections --relative-imports --structure-style filenames --package org.accellera.spirit.v1685_2014 http://www.accellera.org/XMLSchema/SPIRIT/1685-2014/index.xsd
$ xsdata generate --slots --subscriptable-types --generic-collections --relative-imports --structure-style filenames --package org.accellera.spirit.v1685_2022 http://www.accellera.org/XMLSchema/SPIRIT/1685-2022/index.xsd
$ xsdata generate --slots --subscriptable-types --generic-collections --relative-imports --structure-style filenames --package org.accellera.spirit.v1685_2009.ve http://www.accellera.org/XMLSchema/SPIRIT/1685-2009-VE-1.0/index.xsd
$ xsdata generate --slots --subscriptable-types --generic-collections --relative-imports --structure-style filenames --package org.accellera.spirit.v1685_2009.ve.ams http://www.accellera.org/XMLSchema/SPIRIT/1685-2009-VE-1.0/AMS/index.xsd
$ xsdata generate --slots --subscriptable-types --generic-collections --relative-imports --structure-style filenames --package org.accellera.spirit.v1685_2009.ve.core http://www.accellera.org/XMLSchema/SPIRIT/1685-2009-VE-1.0/CORE/index.xsd
$ xsdata generate --slots --subscriptable-types --generic-collections --relative-imports --structure-style filenames --package org.accellera.spirit.v1685_2009.ve.pdp http://www.accellera.org/XMLSchema/SPIRIT/1685-2009-VE-1.0/PDP/index.xsd
$ xsdata generate --slots --subscriptable-types --generic-collections --relative-imports --structure-style filenames --package org.accellera.spirit.v1685_2009.ve.power http://www.accellera.org/XMLSchema/SPIRIT/1685-2009-VE-1.0/POWER/index.xsd
```
I am not sure if the problem is with the XSDs or a bug in xsdata. | tefra/xsdata | diff --git a/tests/codegen/handlers/test_process_attributes_types.py b/tests/codegen/handlers/test_process_attributes_types.py
index b7b96178..e207f271 100644
--- a/tests/codegen/handlers/test_process_attributes_types.py
+++ b/tests/codegen/handlers/test_process_attributes_types.py
@@ -344,25 +344,28 @@ class ProcessAttributeTypesTests(FactoryTestCase):
complex_type = ClassFactory.create(qname="a", tag=Tag.COMPLEX_TYPE)
simple_type = ClassFactory.create(qname="a", tag=Tag.SIMPLE_TYPE)
- actual = self.processor.find_dependency(attr_type, Tag.ELEMENT)
+ actual = self.processor.find_dependency(element, attr_type, Tag.ELEMENT)
self.assertIsNone(actual)
self.processor.container.add(simple_type)
- actual = self.processor.find_dependency(attr_type, Tag.ELEMENT)
+ actual = self.processor.find_dependency(element, attr_type, Tag.ELEMENT)
self.assertEqual(simple_type, actual)
self.processor.container.add(complex_type)
- actual = self.processor.find_dependency(attr_type, Tag.ELEMENT)
+ actual = self.processor.find_dependency(element, attr_type, Tag.ELEMENT)
self.assertEqual(complex_type, actual)
self.processor.container.add(element)
- actual = self.processor.find_dependency(attr_type, Tag.ELEMENT)
+ actual = self.processor.find_dependency(complex_type, attr_type, Tag.ELEMENT)
self.assertEqual(element, actual)
- actual = self.processor.find_dependency(attr_type, Tag.SIMPLE_TYPE)
+ actual = self.processor.find_dependency(element, attr_type, Tag.ELEMENT)
+ self.assertEqual(complex_type, actual)
+
+ actual = self.processor.find_dependency(element, attr_type, Tag.SIMPLE_TYPE)
self.assertEqual(simple_type, actual)
- actual = self.processor.find_dependency(attr_type, Tag.EXTENSION)
+ actual = self.processor.find_dependency(element, attr_type, Tag.EXTENSION)
self.assertEqual(simple_type, actual)
def test_update_restrictions(self):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 24.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[cli,lxml,soap]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-benchmark",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
click-default-group==1.2.4
coverage==7.8.0
docformatter==1.7.5
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
Jinja2==3.1.6
lxml==5.3.1
MarkupSafe==3.0.2
packaging==24.2
pluggy==1.5.0
py-cpuinfo==9.0.0
pytest==8.3.5
pytest-benchmark==5.1.0
pytest-cov==6.0.0
requests==2.32.3
ruff==0.11.2
tomli==2.2.1
toposort==1.10
typing_extensions==4.13.0
untokenize==0.1.1
urllib3==2.3.0
-e git+https://github.com/tefra/xsdata.git@474b072cd05c2f7368ad1680aff7515b9d009f0a#egg=xsdata
| name: xsdata
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- click-default-group==1.2.4
- coverage==7.8.0
- docformatter==1.7.5
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- jinja2==3.1.6
- lxml==5.3.1
- markupsafe==3.0.2
- packaging==24.2
- pluggy==1.5.0
- py-cpuinfo==9.0.0
- pytest==8.3.5
- pytest-benchmark==5.1.0
- pytest-cov==6.0.0
- requests==2.32.3
- ruff==0.11.2
- tomli==2.2.1
- toposort==1.10
- typing-extensions==4.13.0
- untokenize==0.1.1
- urllib3==2.3.0
- xsdata==24.11
prefix: /opt/conda/envs/xsdata
| [
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_find_dependency"
] | [] | [
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_cascade_properties",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_copy_attribute_properties",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_copy_attribute_properties_from_empty_source",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_copy_attribute_properties_from_nillable_source",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_copy_attribute_properties_set_default_value_if_none",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_detect_lazy_namespace",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_dependency_type_with_absent_type",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_dependency_type_with_abstract_type_type",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_dependency_type_with_complex_type",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_dependency_type_with_enumeration_type",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_dependency_type_with_simple_type",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_inner_type_with_circular_reference",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_inner_type_with_complex_type",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_inner_type_with_simple_type",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_native_type",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_type_with_dependency_type",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_type_with_forward_reference",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_type_with_native_type",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_types",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_process_types_with_ignore_patterns",
"tests/codegen/handlers/test_process_attributes_types.py::ProcessAttributeTypesTests::test_update_restrictions"
] | [] | MIT License | 20,441 | 529 | [
"xsdata/codegen/handlers/process_attributes_types.py"
] |
narwhals-dev__narwhals-1550 | d788eece6f1bee9e21d3840e1ec9e25799504331 | 2024-12-10 06:50:10 | a8fb9a2159a0072280e53b9f5726f8d36b7793ba | diff --git a/narwhals/dependencies.py b/narwhals/dependencies.py
index 463a64a6..61937f08 100644
--- a/narwhals/dependencies.py
+++ b/narwhals/dependencies.py
@@ -24,6 +24,9 @@ if TYPE_CHECKING:
import pyarrow as pa
import pyspark.sql as pyspark_sql
+ from narwhals.dataframe import DataFrame
+ from narwhals.dataframe import LazyFrame
+ from narwhals.series import Series
from narwhals.typing import IntoSeries
# We silently allow these but - given that they claim
@@ -320,6 +323,42 @@ def is_into_dataframe(native_dataframe: Any) -> bool:
)
+def is_narwhals_dataframe(df: Any) -> TypeGuard[DataFrame[Any]]:
+ """Check whether `df` is a Narwhals DataFrame.
+
+ This is useful if you expect a user to pass in a Narwhals
+ DataFrame directly, and you want to catch both ``narwhals.DataFrame``
+ and ``narwhals.stable.v1.DataFrame`.
+ """
+ from narwhals.dataframe import DataFrame
+
+ return isinstance(df, DataFrame)
+
+
+def is_narwhals_lazyframe(lf: Any) -> TypeGuard[LazyFrame[Any]]:
+ """Check whether `lf` is a Narwhals LazyFrame.
+
+ This is useful if you expect a user to pass in a Narwhals
+ LazyFrame directly, and you want to catch both ``narwhals.LazyFrame``
+ and ``narwhals.stable.v1.LazyFrame`.
+ """
+ from narwhals.dataframe import LazyFrame
+
+ return isinstance(lf, LazyFrame)
+
+
+def is_narwhals_series(ser: Any) -> TypeGuard[Series[Any]]:
+ """Check whether `ser` is a Narwhals Series.
+
+ This is useful if you expect a user to pass in a Narwhals
+ Series directly, and you want to catch both ``narwhals.Series``
+ and ``narwhals.stable.v1.Series`.
+ """
+ from narwhals.series import Series
+
+ return isinstance(ser, Series)
+
+
__all__ = [
"get_cudf",
"get_ibis",
@@ -336,6 +375,9 @@ __all__ = [
"is_into_series",
"is_modin_dataframe",
"is_modin_series",
+ "is_narwhals_dataframe",
+ "is_narwhals_lazyframe",
+ "is_narwhals_series",
"is_numpy_array",
"is_pandas_dataframe",
"is_pandas_index",
diff --git a/narwhals/stable/v1/dependencies.py b/narwhals/stable/v1/dependencies.py
index 6a020622..8ad1ad20 100644
--- a/narwhals/stable/v1/dependencies.py
+++ b/narwhals/stable/v1/dependencies.py
@@ -15,6 +15,9 @@ from narwhals.dependencies import is_into_dataframe
from narwhals.dependencies import is_into_series
from narwhals.dependencies import is_modin_dataframe
from narwhals.dependencies import is_modin_series
+from narwhals.dependencies import is_narwhals_dataframe
+from narwhals.dependencies import is_narwhals_lazyframe
+from narwhals.dependencies import is_narwhals_series
from narwhals.dependencies import is_numpy_array
from narwhals.dependencies import is_pandas_dataframe
from narwhals.dependencies import is_pandas_index
@@ -43,6 +46,9 @@ __all__ = [
"is_into_series",
"is_modin_dataframe",
"is_modin_series",
+ "is_narwhals_dataframe",
+ "is_narwhals_lazyframe",
+ "is_narwhals_series",
"is_numpy_array",
"is_pandas_dataframe",
"is_pandas_index",
| enh: `nw.dependencies.is_narwhals_dataframe` / `nw.dependencies.is_narwhals_lazyframe` / `nw.dependencies.is_narwhals_series`
We have functions such as `nw.dependencies.is_pandas_dataframe`
We should add the three mentioned here:
- `nw.dependencies.is_narwhals_dataframe`
- `nw.dependencies.is_narwhals_lazyframe`
- `nw.dependencies.is_narwhals_series`
Use-case: the second bullet point raised here: https://github.com/matthewwardrop/formulaic/pull/226#issuecomment-2528019080
We can't expect users to remember to import narwhals (as opposed to the stable version) just for `isinstance` checks. `nw.dependencies.is_narwhals_dataframe` should handle both the `nw.DataFrame` and the `nw.stable.v1.DataFrame` cases | narwhals-dev/narwhals | diff --git a/tests/dependencies/is_narwhals_dataframe_test.py b/tests/dependencies/is_narwhals_dataframe_test.py
new file mode 100644
index 00000000..0a25bc8b
--- /dev/null
+++ b/tests/dependencies/is_narwhals_dataframe_test.py
@@ -0,0 +1,18 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+import narwhals as nw
+import narwhals.stable.v1 as nws
+from narwhals.stable.v1.dependencies import is_narwhals_dataframe
+
+if TYPE_CHECKING:
+ from tests.utils import ConstructorEager
+
+
+def test_is_narwhals_dataframe(constructor_eager: ConstructorEager) -> None:
+ df = constructor_eager({"col1": [1, 2], "col2": [3, 4]})
+
+ assert is_narwhals_dataframe(nw.from_native(df))
+ assert is_narwhals_dataframe(nws.from_native(df))
+ assert not is_narwhals_dataframe(df)
diff --git a/tests/dependencies/is_narwhals_lazyframe_test.py b/tests/dependencies/is_narwhals_lazyframe_test.py
new file mode 100644
index 00000000..1dcbe5fc
--- /dev/null
+++ b/tests/dependencies/is_narwhals_lazyframe_test.py
@@ -0,0 +1,19 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+import narwhals as nw
+import narwhals.stable.v1 as nws
+from narwhals.stable.v1.dependencies import is_narwhals_lazyframe
+from tests.utils import Constructor
+
+if TYPE_CHECKING:
+ from tests.utils import Constructor
+
+
+def test_is_narwhals_lazyframe(constructor: Constructor) -> None:
+ lf = constructor({"a": [1, 2, 3]})
+
+ assert is_narwhals_lazyframe(nw.from_native(lf).lazy())
+ assert is_narwhals_lazyframe(nws.from_native(lf).lazy())
+ assert not is_narwhals_lazyframe(lf)
diff --git a/tests/dependencies/is_narwhals_series_test.py b/tests/dependencies/is_narwhals_series_test.py
new file mode 100644
index 00000000..0beb3fc1
--- /dev/null
+++ b/tests/dependencies/is_narwhals_series_test.py
@@ -0,0 +1,18 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+
+import narwhals as nw
+import narwhals.stable.v1 as nws
+from narwhals.stable.v1.dependencies import is_narwhals_series
+
+if TYPE_CHECKING:
+ from tests.utils import ConstructorEager
+
+
+def test_is_narwhals_series(constructor_eager: ConstructorEager) -> None:
+ df = constructor_eager({"col1": [1, 2], "col2": [3, 4]})
+
+ assert is_narwhals_series(nw.from_native(df, eager_only=True)["col1"])
+ assert is_narwhals_series(nws.from_native(df, eager_only=True)["col1"])
+ assert not is_narwhals_series(nw.from_native(df, eager_only=True)["col1"].to_native())
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 1.16 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
cfgv==3.4.0
click==8.1.8
cloudpickle==3.1.1
covdefaults==2.3.0
coverage==7.8.0
dask==2024.8.0
dask-expr==1.1.10
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
filelock==3.18.0
fsspec==2025.3.1
hypothesis==6.130.5
identify==2.6.9
importlib_metadata==8.6.1
iniconfig==2.1.0
joblib==1.4.2
locket==1.0.0
-e git+https://github.com/narwhals-dev/narwhals.git@d788eece6f1bee9e21d3840e1ec9e25799504331#egg=narwhals
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
partd==1.4.2
platformdirs==4.3.7
pluggy==1.5.0
polars==1.26.0
pre_commit==4.2.0
py4j==0.10.9.7
pyarrow==19.0.1
pyarrow-stubs==17.19
pyspark==3.5.5
pytest==8.3.5
pytest-cov==6.0.0
pytest-env==1.1.5
pytest-randomly==3.16.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
sortedcontainers==2.4.0
threadpoolctl==3.6.0
tomli==2.2.1
toolz==1.0.0
tqdm==4.67.1
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
zipp==3.21.0
| name: narwhals
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- cfgv==3.4.0
- click==8.1.8
- cloudpickle==3.1.1
- covdefaults==2.3.0
- coverage==7.8.0
- dask==2024.8.0
- dask-expr==1.1.10
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- filelock==3.18.0
- fsspec==2025.3.1
- hypothesis==6.130.5
- identify==2.6.9
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- joblib==1.4.2
- locket==1.0.0
- narwhals==1.16.0
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- partd==1.4.2
- platformdirs==4.3.7
- pluggy==1.5.0
- polars==1.26.0
- pre-commit==4.2.0
- py4j==0.10.9.7
- pyarrow==19.0.1
- pyarrow-stubs==17.19
- pyspark==3.5.5
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-env==1.1.5
- pytest-randomly==3.16.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- sortedcontainers==2.4.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- toolz==1.0.0
- tqdm==4.67.1
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/narwhals
| [
"tests/dependencies/is_narwhals_dataframe_test.py::test_is_narwhals_dataframe[pyarrow_table_constructor]",
"tests/dependencies/is_narwhals_dataframe_test.py::test_is_narwhals_dataframe[pandas_pyarrow_constructor]",
"tests/dependencies/is_narwhals_dataframe_test.py::test_is_narwhals_dataframe[pandas_nullable_constructor]",
"tests/dependencies/is_narwhals_dataframe_test.py::test_is_narwhals_dataframe[pandas_constructor]",
"tests/dependencies/is_narwhals_dataframe_test.py::test_is_narwhals_dataframe[polars_eager_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[dask_lazy_p2_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[pandas_pyarrow_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[polars_lazy_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[pandas_nullable_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[pyarrow_table_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[polars_eager_constructor]",
"tests/dependencies/is_narwhals_lazyframe_test.py::test_is_narwhals_lazyframe[pandas_constructor]",
"tests/dependencies/is_narwhals_series_test.py::test_is_narwhals_series[pandas_pyarrow_constructor]",
"tests/dependencies/is_narwhals_series_test.py::test_is_narwhals_series[pandas_nullable_constructor]",
"tests/dependencies/is_narwhals_series_test.py::test_is_narwhals_series[pyarrow_table_constructor]",
"tests/dependencies/is_narwhals_series_test.py::test_is_narwhals_series[pandas_constructor]",
"tests/dependencies/is_narwhals_series_test.py::test_is_narwhals_series[polars_eager_constructor]"
] | [] | [] | [] | MIT License | 20,448 | 931 | [
"narwhals/dependencies.py",
"narwhals/stable/v1/dependencies.py"
] |
|
tobymao__sqlglot-4493 | 2655d7c11d677cf47f33ac62fbfb86f4117ffd75 | 2024-12-10 09:06:43 | ab108518c53173ddf71ac1dfd9e45df6ac621b81 | diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index e0d392b0..aaa1e376 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -107,6 +107,13 @@ def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
return _builder
+def _build_bitor(args: t.List) -> exp.BitwiseOr | exp.Anonymous:
+ if len(args) == 3:
+ return exp.Anonymous(this="BITOR", expressions=args)
+
+ return binary_from_function(exp.BitwiseOr)(args)
+
+
# https://docs.snowflake.com/en/sql-reference/functions/div0
def _build_if_from_div0(args: t.List) -> exp.If:
lhs = exp._wrap(seq_get(args, 0), exp.Binary)
@@ -393,6 +400,8 @@ class Snowflake(Dialect):
),
"BITXOR": binary_from_function(exp.BitwiseXor),
"BIT_XOR": binary_from_function(exp.BitwiseXor),
+ "BITOR": _build_bitor,
+ "BIT_OR": _build_bitor,
"BOOLXOR": binary_from_function(exp.Xor),
"DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
"DATE_TRUNC": _date_trunc_to_time,
@@ -790,6 +799,14 @@ class Snowflake(Dialect):
return this
+ def _parse_foreign_key(self) -> exp.ForeignKey:
+ # inlineFK, the REFERENCES columns are implied
+ if self._match(TokenType.REFERENCES, advance=False):
+ return self.expression(exp.ForeignKey)
+
+ # outoflineFK, explicitly names the columns
+ return super()._parse_foreign_key()
+
class Tokenizer(tokens.Tokenizer):
STRING_ESCAPES = ["\\", "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
@@ -869,6 +886,7 @@ class Snowflake(Dialect):
"CONVERT_TIMEZONE", e.args.get("zone"), e.this
),
exp.BitwiseXor: rename_func("BITXOR"),
+ exp.BitwiseOr: rename_func("BITOR"),
exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
exp.DateAdd: date_delta_sql("DATEADD"),
exp.DateDiff: date_delta_sql("DATEDIFF"),
diff --git a/sqlglot/expressions.py b/sqlglot/expressions.py
index cce3a17a..b804212f 100644
--- a/sqlglot/expressions.py
+++ b/sqlglot/expressions.py
@@ -2113,7 +2113,7 @@ class Directory(Expression):
class ForeignKey(Expression):
arg_types = {
- "expressions": True,
+ "expressions": False,
"reference": False,
"delete": False,
"update": False,
diff --git a/sqlglot/generator.py b/sqlglot/generator.py
index 9aee03c1..94fe9170 100644
--- a/sqlglot/generator.py
+++ b/sqlglot/generator.py
@@ -2829,13 +2829,14 @@ class Generator(metaclass=_Generator):
def foreignkey_sql(self, expression: exp.ForeignKey) -> str:
expressions = self.expressions(expression, flat=True)
+ expressions = f" ({expressions})" if expressions else ""
reference = self.sql(expression, "reference")
reference = f" {reference}" if reference else ""
delete = self.sql(expression, "delete")
delete = f" ON DELETE {delete}" if delete else ""
update = self.sql(expression, "update")
update = f" ON UPDATE {update}" if update else ""
- return f"FOREIGN KEY ({expressions}){reference}{delete}{update}"
+ return f"FOREIGN KEY{expressions}{reference}{delete}{update}"
def primarykey_sql(self, expression: exp.ForeignKey) -> str:
expressions = self.expressions(expression, flat=True)
| Parsing Fails on Snowflake Foreign Keys Without Explicit Names
When parsing a Snowflake `CREATE TABLE` statement that creates an inline foreign key constraint with no explicit name, `sqlglot` fails with the following error:
```
sqlglot.errors.ParseError: Expecting (. Line 4, Col: 48.
```
It seems that it is expecting an explicit column name for the foreign key constraint.
From what I can tell from [Snowflake's documentation](https://docs.snowflake.com/en/sql-reference/sql/create-table-constraint#:~:text=do%20not%20need%20to%20be%20specified%20if%20the%20signature), this is valid syntax:
> When defining foreign keys, either inline or out-of-line, column name(s) for the referenced table do not need to be specified if the signature (name and data type) of the foreign key column(s) and the referenced table’s primary key column(s) exactly match
I am also able to successfully run these statements in Snowflake.
**Fully reproducible code snippet**
```python
from sqlglot import parse_one
sql = '''
CREATE OR REPLACE TABLE TEST (
ID NUMBER NOT NULL IDENTITY PRIMARY KEY,
SOME_REF NUMBER NOT NULL FOREIGN KEY REFERENCES SOME_OTHER_TABLE (ID)
);
'''
parsed = parse_one(sql, read='snowflake')
```
However, this does work:
```python
from sqlglot import parse_one
sql = '''
CREATE OR REPLACE TABLE TEST (
ID NUMBER NOT NULL IDENTITY PRIMARY KEY,
SOME_REF NUMBER NOT NULL FOREIGN KEY (SOME_NAME) REFERENCES SOME_OTHER_TABLE (ID)
);
'''
parsed = parse_one(sql, read='snowflake')
```
**Official Documentation**
https://docs.snowflake.com/en/sql-reference/sql/create-table-constraint#:~:text=do%20not%20need%20to%20be%20specified%20if%20the%20signature
| tobymao/sqlglot | diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 4eb97235..a11c21a4 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -976,6 +976,12 @@ WHERE
"snowflake": "EDITDISTANCE(col1, col2, 3)",
},
)
+ self.validate_identity("SELECT BITOR(a, b) FROM table")
+
+ self.validate_identity("SELECT BIT_OR(a, b) FROM table", "SELECT BITOR(a, b) FROM table")
+
+ # Test BITOR with three arguments, padding on the left
+ self.validate_identity("SELECT BITOR(a, b, 'LEFT') FROM table_name")
def test_null_treatment(self):
self.validate_all(
@@ -1450,6 +1456,9 @@ WHERE
self.validate_identity(
"""CREATE OR REPLACE FUNCTION ibis_udfs.public.object_values("obj" OBJECT) RETURNS ARRAY LANGUAGE JAVASCRIPT STRICT AS ' return Object.values(obj) '"""
)
+ self.validate_identity(
+ "CREATE OR REPLACE TABLE TEST (SOME_REF DECIMAL(38, 0) NOT NULL FOREIGN KEY REFERENCES SOME_OTHER_TABLE (ID))"
+ )
self.validate_identity(
"CREATE OR REPLACE FUNCTION my_udf(location OBJECT(city VARCHAR, zipcode DECIMAL(38, 0), val ARRAY(BOOLEAN))) RETURNS VARCHAR AS $$ SELECT 'foo' $$",
"CREATE OR REPLACE FUNCTION my_udf(location OBJECT(city VARCHAR, zipcode DECIMAL(38, 0), val ARRAY(BOOLEAN))) RETURNS VARCHAR AS ' SELECT \\'foo\\' '",
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 25.34 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
filelock==3.18.0
identify==2.6.9
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pre_commit==4.2.0
Pygments==2.19.1
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.7.2
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@2655d7c11d677cf47f33ac62fbfb86f4117ffd75#egg=sqlglot
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- duckdb==1.2.1
- execnet==2.1.1
- filelock==3.18.0
- identify==2.6.9
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pre-commit==4.2.0
- pygments==2.19.1
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.7.2
- six==1.17.0
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake"
] | [] | [
"tests/dialects/test_snowflake.py::TestSnowflake::test_alter_set_unset",
"tests/dialects/test_snowflake.py::TestSnowflake::test_copy",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten",
"tests/dialects/test_snowflake.py::TestSnowflake::test_from_changes",
"tests/dialects/test_snowflake.py::TestSnowflake::test_grant",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_querying_semi_structured_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_columns",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_imported_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_objects",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_primary_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_schemas",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_sequences",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_tables",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_unique_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_users",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_views",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_storage_integration",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values",
"tests/dialects/test_snowflake.py::TestSnowflake::test_window_function_arg"
] | [] | MIT License | 20,450 | 974 | [
"sqlglot/dialects/snowflake.py",
"sqlglot/expressions.py",
"sqlglot/generator.py"
] |
|
nilearn__nilearn-4929 | cd17602ed281f5ebfb8b29f7178539344682acfd | 2024-12-10 14:40:33 | d979bacad0dfd0600e35a23f9f08d3df7907e489 | hndgzkn: > 'prob' and 'det' which are not at all good. We should deprecate them with "probabilistic" & "deterministic"
I haven't changed the possible values for `version`/`atlas_type`, shall I?
Remi-Gau: > I haven't changed the possible values for `version`/`atlas_type`, shall I?
yes
can be in this PR or another one if you prefer
hndgzkn: > can be in this PR or another one if you prefer
Is there something special for deprecation, or just changing the values is ok?
Remi-Gau: > Is there something special for deprecation, or just changing the values is ok?
we probably need to map old values to new values and throw a deprecation warning if old values are passed
github-actions[bot]: 👋 @hndgzkn Thanks for creating a PR!
Until this PR is ready for review, you can include the [WIP] tag in its title, or leave it as a github draft.
Please make sure it is compliant with our [contributing guidelines](https://nilearn.github.io/stable/development.html#contribution-guidelines). In particular, be sure it checks the boxes listed below.
- [ ] PR has an interpretable title.
- [ ] PR links to Github issue with mention `Closes #XXXX` (see our documentation on [PR structure](https://nilearn.github.io/stable/development.html#pr-structure))
- [ ] Code is PEP8-compliant (see our documentation on [coding style](https://nilearn.github.io/stable/development.html#coding-style))
- [ ] Changelog or what's new entry in `doc/changes/latest.rst` (see our documentation on [PR structure](https://nilearn.github.io/stable/development.html#pr-structure))
For new features:
- [ ] There is at least one unit test per new function / class (see our documentation on [testing](https://nilearn.github.io/stable/development.html#tests))
- [ ] The new feature is demoed in at least one relevant example.
For bug fixes:
- [ ] There is at least one test that would fail under the original bug conditions.
We will review it as quick as possible, feel free to ping us with questions if needed.
codecov[bot]: ## [Codecov](https://app.codecov.io/gh/nilearn/nilearn/pull/4929?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) Report
All modified and coverable lines are covered by tests :white_check_mark:
> Project coverage is 93.28%. Comparing base [(`faad5c7`)](https://app.codecov.io/gh/nilearn/nilearn/commit/faad5c7df045cf616bec55795db6dcd9a85f140a?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) to head [(`195359b`)](https://app.codecov.io/gh/nilearn/nilearn/commit/195359b05bc9e41c75bcf4ea40df7c3ca3dc4e16?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn).
> Report is 9 commits behind head on main.
<details><summary>Additional details and impacted files</summary>
```diff
@@ Coverage Diff @@
## main #4929 +/- ##
==========================================
+ Coverage 92.39% 93.28% +0.89%
==========================================
Files 137 136 -1
Lines 17153 17143 -10
Branches 2950 2945 -5
==========================================
+ Hits 15848 15992 +144
+ Misses 782 623 -159
- Partials 523 528 +5
```
| [Flag](https://app.codecov.io/gh/nilearn/nilearn/pull/4929/flags?src=pr&el=flags&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | Coverage Δ | |
|---|---|---|
| [ubuntu-latest_3.9_test_min](https://app.codecov.io/gh/nilearn/nilearn/pull/4929/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [ubuntu-latest_3.9_test_plot_min](https://app.codecov.io/gh/nilearn/nilearn/pull/4929/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [windows-latest_3.13_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4929/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `93.28% <100.00%> (?)` | |
Flags with carried forward coverage won't be shown. [Click here](https://docs.codecov.io/docs/carryforward-flags?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn#carryforward-flags-in-the-pull-request-comment) to find out more.
</details>
[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/nilearn/nilearn/pull/4929?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn).
:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn).
Remi-Gau: @hndgzkn
quick FYI: as this PR implies a behavior change it won't be in the next 'patch release' (next week 🤞🏽) that should contain only bug fixes, refactor, doc changes. | diff --git a/nilearn/datasets/atlas.py b/nilearn/datasets/atlas.py
index cac8de1c0..bce0bfb04 100644
--- a/nilearn/datasets/atlas.py
+++ b/nilearn/datasets/atlas.py
@@ -13,14 +13,14 @@ import pandas as pd
from nibabel import freesurfer, load
from sklearn.utils import Bunch
-from .._utils import check_niimg, fill_doc, logger
-from ..image import get_data, new_img_like, reorder_img
-from ._utils import (
+from nilearn._utils import check_niimg, fill_doc, logger, rename_parameters
+from nilearn.datasets._utils import (
PACKAGE_DIRECTORY,
fetch_files,
get_dataset_descr,
get_dataset_dir,
)
+from nilearn.image import get_data, new_img_like, reorder_img
_TALAIRACH_LEVELS = ["hemisphere", "lobe", "gyrus", "tissue", "ba"]
@@ -1961,8 +1961,13 @@ def fetch_atlas_talairach(level_name, data_dir=None, verbose=1):
return Bunch(maps=atlas_img, labels=labels, description=description)
+@rename_parameters(
+ replacement_params={"version": "atlas_type"}, end_version="0.13.1"
+)
@fill_doc
-def fetch_atlas_pauli_2017(version="prob", data_dir=None, verbose=1):
+def fetch_atlas_pauli_2017(
+ atlas_type="probabilistic", data_dir=None, verbose=1
+):
"""Download the Pauli et al. (2017) atlas.
This atlas has 12 subcortical nodes in total. See
@@ -1970,10 +1975,10 @@ def fetch_atlas_pauli_2017(version="prob", data_dir=None, verbose=1):
Parameters
----------
- version : {'prob', 'det'}, default='prob'
- Which version of the atlas should be download. This can be
- 'prob' for the :term:`Probabilistic atlas`, or 'det' for the
- :term:`Deterministic atlas`.
+ atlas_type : {'probabilistic', 'deterministic'}, default='probabilistic'
+ Which type of the atlas should be download. This can be
+ 'probabilistic' for the :term:`Probabilistic atlas`, or 'deterministic'
+ for the :term:`Deterministic atlas`.
%(data_dir)s
%(verbose)s
@@ -1983,17 +1988,18 @@ def fetch_atlas_pauli_2017(version="prob", data_dir=None, verbose=1):
Dictionary-like object, contains:
- 'maps': :obj:`str`, path to nifti file containing the
- :class:`~nibabel.nifti1.Nifti1Image`. If ``version='prob'``,
- the image shape is ``(193, 229, 193, 16)``. If ``version='det'``
- the image shape is ``(198, 263, 212)``, and values are indices
- in the list of labels (integers from 0 to 16).
+ :class:`~nibabel.nifti1.Nifti1Image`. If
+ ``atlas_type='probabilistic'``, the image shape is ``(193, 229,
+ 193, 16)``. If ``atlas_type='deterministic'`` the image shape is
+ ``(198, 263, 212)``, and values are indices in the list of labels
+ (integers from 0 to 16).
- 'labels': :obj:`list` of :obj:`str`. List of region names. The
list contains 16 values for both
:term:`probabilitic<Probabilistic atlas>` and
- :term:`deterministic<Deterministic atlas>` versions.
+ :term:`deterministic<Deterministic atlas>` types.
.. warning::
- For the :term:`deterministic<Deterministic atlas>` version,
+ For the :term:`deterministic<Deterministic atlas>` type,
'Background' is not included in the list of labels.
To have proper indexing, you should either manually add
'Background' to the list of labels:
@@ -2015,20 +2021,43 @@ def fetch_atlas_pauli_2017(version="prob", data_dir=None, verbose=1):
- 'description': :obj:`str`, short description of the atlas and
some references.
+ Warns
+ -----
+ DeprecationWarning
+ The possible values for atlas_type are currently 'prob' and 'det'. From
+ release 0.13.0 onwards, atlas_type will accept only 'probabilistic' or
+ 'deterministic' as value.
+
References
----------
.. footbibliography::
"""
- if version == "prob":
+ # TODO: remove this part after release 0.13.0
+ if atlas_type in ("prob", "det"):
+ atlas_type_values = (
+ "The possible values for atlas_type are currently 'prob' and"
+ " 'det'. From release 0.13.0 onwards, atlas_type will accept only"
+ " 'probabilistic' or 'deterministic' as value."
+ )
+ warnings.warn(
+ category=DeprecationWarning,
+ message=atlas_type_values,
+ stacklevel=2,
+ )
+ atlas_type = (
+ "probabilistic" if atlas_type == "prob" else "deterministic"
+ )
+
+ if atlas_type == "probabilistic":
url_maps = "https://osf.io/w8zq2/download"
filename = "pauli_2017_prob.nii.gz"
- elif version == "det":
+ elif atlas_type == "deterministic":
url_maps = "https://osf.io/5mqfx/download"
filename = "pauli_2017_det.nii.gz"
else:
raise NotImplementedError(
- f"{version} is no valid version for the Pauli atlas"
+ f"{atlas_type} is not a valid type for the Pauli atlas"
)
url_labels = "https://osf.io/6qrcb/download"
| Improper parameter name & non-optimal args for atlas Pauli 2017
The `version` parameter for :func:`nilearn.datasets.fetch_atlas_pauli_2017` is a weird name. Version makes me think of numbers and iterations, and chronological versions of atlases, not the type of atlas. We should change it to `type`.
Additionally, currently, the valid arguments for this are 'prob' and 'det' which are not at all good. We should deprecate them with "probabilistic" & "deterministic".
I will open a PR for this.
| nilearn/nilearn | diff --git a/nilearn/datasets/tests/test_atlas.py b/nilearn/datasets/tests/test_atlas.py
index 2d037a560..b941f5df8 100644
--- a/nilearn/datasets/tests/test_atlas.py
+++ b/nilearn/datasets/tests/test_atlas.py
@@ -741,7 +741,7 @@ def test_fetch_atlas_pauli_2017(tmp_path, request_mocker):
request_mocker.url_mapping["*osf.io/w8zq2/*"] = prob_atlas
data_dir = str(tmp_path / "pauli_2017")
- data = atlas.fetch_atlas_pauli_2017("det", data_dir)
+ data = atlas.fetch_atlas_pauli_2017("deterministic", data_dir)
assert isinstance(data, Bunch)
@@ -753,7 +753,7 @@ def test_fetch_atlas_pauli_2017(tmp_path, request_mocker):
assert len(np.unique(values)) == 17
- data = atlas.fetch_atlas_pauli_2017("prob", data_dir)
+ data = atlas.fetch_atlas_pauli_2017("probabilistic", data_dir)
assert load(data.maps).shape[-1] == 16
@@ -763,6 +763,52 @@ def test_fetch_atlas_pauli_2017(tmp_path, request_mocker):
atlas.fetch_atlas_pauli_2017("junk for testing", data_dir)
+# TODO: remove this test after release 0.13.0
+def test_fetch_atlas_pauli_2017_deprecated_values(tmp_path, request_mocker):
+ """Tests nilearn.datasets.atlas.fetch_atlas_pauli_2017 to receive
+ DepricationWarning upon use of deprecated version parameter and its
+ possible values "prob" and "det".
+ """
+ labels = pd.DataFrame({"label": [f"label_{i}" for i in range(16)]}).to_csv(
+ sep="\t", header=False
+ )
+ det_atlas = data_gen.generate_labeled_regions((7, 6, 5), 16)
+ prob_atlas, _ = data_gen.generate_maps((7, 6, 5), 16)
+ request_mocker.url_mapping["*osf.io/6qrcb/*"] = labels
+ request_mocker.url_mapping["*osf.io/5mqfx/*"] = det_atlas
+ request_mocker.url_mapping["*osf.io/w8zq2/*"] = prob_atlas
+ data_dir = str(tmp_path / "pauli_2017")
+
+ with pytest.warns(DeprecationWarning, match='The parameter "version"'):
+ data = atlas.fetch_atlas_pauli_2017(
+ version="probabilistic", data_dir=data_dir
+ )
+
+ assert load(data.maps).shape[-1] == 16
+
+ assert data.description != ""
+
+ with pytest.warns(
+ DeprecationWarning, match="The possible values for atlas_type"
+ ):
+ data = atlas.fetch_atlas_pauli_2017("det", data_dir)
+
+ assert isinstance(data, Bunch)
+
+ assert data.description != ""
+
+ assert len(data.labels) == 16
+
+ with pytest.warns(
+ DeprecationWarning, match="The possible values for atlas_type"
+ ):
+ data = atlas.fetch_atlas_pauli_2017("prob", data_dir)
+
+ assert load(data.maps).shape[-1] == 16
+
+ assert data.description != ""
+
+
def _schaefer_labels(match, requests): # noqa: ARG001
# fails if requests is not passed
info = match.groupdict()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-randomly",
"pytest-reporter-html1",
"pytest-xdist"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
ansi2html==1.9.2
babel==2.17.0
beautifulsoup4==4.13.3
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
contourpy==1.3.0
coverage==7.8.0
cycler==0.12.1
distlib==0.3.9
docutils==0.21.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
filelock==3.18.0
fonttools==4.56.0
furo==2024.8.6
htmlmin2==0.1.13
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
joblib==1.4.2
kaleido==0.2.1
kiwisolver==1.4.7
latexcodec==3.0.0
lxml==5.3.1
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.9.4
mdit-py-plugins==0.4.2
mdurl==0.1.2
memory-profiler==0.61.0
myst-parser==3.0.1
narwhals==1.32.0
nibabel==5.3.2
-e git+https://github.com/nilearn/nilearn.git@cd17602ed281f5ebfb8b29f7178539344682acfd#egg=nilearn
numpy==2.0.2
numpydoc==1.8.0
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pillow==11.1.0
platformdirs==4.3.7
plotly==6.0.1
pluggy @ file:///croot/pluggy_1733169602837/work
psutil==7.0.0
pybtex==0.24.0
pybtex-docutils==1.0.3
Pygments==2.19.1
pyparsing==3.2.3
pyproject-api==1.9.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
pytest-randomly==3.16.0
pytest-reporter==0.5.3
pytest-reporter-html1==0.9.2
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
requests==2.32.3
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
snowballstemmer==2.2.0
soupsieve==2.6
Sphinx==7.4.7
sphinx-basic-ng==1.0.0b2
sphinx-copybutton==0.5.2
sphinx-gallery==0.19.0
sphinx_design==0.6.1
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-bibtex==2.6.3
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
sphinxext-opengraph==0.9.1
tabulate==0.9.0
threadpoolctl==3.6.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: nilearn
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- ansi2html==1.9.2
- babel==2.17.0
- beautifulsoup4==4.13.3
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- contourpy==1.3.0
- coverage==7.8.0
- cycler==0.12.1
- distlib==0.3.9
- docutils==0.21.2
- execnet==2.1.1
- filelock==3.18.0
- fonttools==4.56.0
- furo==2024.8.6
- htmlmin2==0.1.13
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- jinja2==3.1.6
- joblib==1.4.2
- kaleido==0.2.1
- kiwisolver==1.4.7
- latexcodec==3.0.0
- lxml==5.3.1
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib==3.9.4
- mdit-py-plugins==0.4.2
- mdurl==0.1.2
- memory-profiler==0.61.0
- myst-parser==3.0.1
- narwhals==1.32.0
- nibabel==5.3.2
- nilearn==0.11.2.dev3+gcd17602ed
- numpy==2.0.2
- numpydoc==1.8.0
- pandas==2.2.3
- pillow==11.1.0
- platformdirs==4.3.7
- plotly==6.0.1
- psutil==7.0.0
- pybtex==0.24.0
- pybtex-docutils==1.0.3
- pygments==2.19.1
- pyparsing==3.2.3
- pyproject-api==1.9.0
- pytest-cov==6.0.0
- pytest-randomly==3.16.0
- pytest-reporter==0.5.3
- pytest-reporter-html1==0.9.2
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- requests==2.32.3
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.12
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- snowballstemmer==2.2.0
- soupsieve==2.6
- sphinx==7.4.7
- sphinx-basic-ng==1.0.0b2
- sphinx-copybutton==0.5.2
- sphinx-design==0.6.1
- sphinx-gallery==0.19.0
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-bibtex==2.6.3
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sphinxext-opengraph==0.9.1
- tabulate==0.9.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/nilearn
| [
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_pauli_2017_deprecated_values",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_pauli_2017"
] | [] | [
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_basc_multiscale_2015_old_code[scale036]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_source",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_talairach",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_basc_multiscale_2015_error",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_basc_multiscale_2015_old_code[scale325]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_aal[SPM8-zip-uploads]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_basc_multiscale_2015_old_code[scale444]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_coords_dosenbach_2010",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_destrieux_2009[True]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_allen_2011",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_fsl_errors[Juelich-prob-1mm]",
"nilearn/datasets/tests/test_atlas.py::test_atlas_fetcher_return_bunch[fetch_coords_power_2011]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_fsl_errors[HarvardOxford-cortl-prob-1mm]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_basc_multiscale_2015_old_code[scale122]",
"nilearn/datasets/tests/test_atlas.py::test_atlas_fetcher_return_bunch[fetch_atlas_smith_2009]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_difumo",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_fsl[HarvardOxford--Cortical-cort-prob-1mm-False-False]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_smith_2009",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_fsl[HarvardOxford--Subcortical-sub-maxprob-thr0-1mm-False-True]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_basc_multiscale_2015_old_code[scale020]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_surf_destrieux",
"nilearn/datasets/tests/test_atlas.py::test_atlas_fetcher_return_bunch[fetch_atlas_yeo_2011]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_basc_multiscale_2015_old_code[scale197]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_aal[SPM5-zip-uploads]",
"nilearn/datasets/tests/test_atlas.py::test_atlas_fetcher_return_bunch[fetch_atlas_allen_2011]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_basc_multiscale_2015",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_fsl[HarvardOxford--Cortical-Lateralized-cortl-maxprob-thr0-1mm-True-True]",
"nilearn/datasets/tests/test_atlas.py::test_atlas_fetcher_return_bunch[fetch_coords_seitzman_2018]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_aal_version_error",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_destrieux_2009[False]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_msdl",
"nilearn/datasets/tests/test_atlas.py::test_aal_version_deprecation",
"nilearn/datasets/tests/test_atlas.py::test_atlas_fetcher_return_bunch[fetch_coords_dosenbach_2010]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_basc_multiscale_2015_old_code[scale012]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_fsl[Juelich--maxprob-thr0-1mm-False-True]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_schaefer_2018",
"nilearn/datasets/tests/test_atlas.py::test_atlas_fetcher_return_bunch[fetch_atlas_schaefer_2018]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_coords_power_2011",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_basc_multiscale_2015_old_code[scale007]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_coords_seitzman_2018",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_basc_multiscale_2015_old_code[scale064]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_fsl[Juelich--prob-1mm-False-False]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_fsl[Juelich--maxprob-thr0-1mm-False-False]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_yeo_2011",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_aal[SPM12-gztar-AAL_files]",
"nilearn/datasets/tests/test_atlas.py::test_atlas_fetcher_return_bunch[fetch_atlas_basc_multiscale_2015]",
"nilearn/datasets/tests/test_atlas.py::test_fetch_atlas_craddock_2012",
"nilearn/datasets/tests/test_atlas.py::test_downloader"
] | [] | New BSD License | 20,453 | 1,515 | [
"nilearn/datasets/atlas.py"
] |
Kinto__kinto-3477 | 10cca5fa71dee834068f9405cf6f7df28c14f7d4 | 2024-12-10 18:02:02 | 10cca5fa71dee834068f9405cf6f7df28c14f7d4 | diff --git a/kinto/core/initialization.py b/kinto/core/initialization.py
index 397dc7ce..0b8fa7ad 100644
--- a/kinto/core/initialization.py
+++ b/kinto/core/initialization.py
@@ -1,6 +1,7 @@
import logging
import random
import re
+import urllib.parse
import warnings
from datetime import datetime
from secrets import token_hex
@@ -474,6 +475,7 @@ def setup_metrics(config):
try:
endpoint = utils.strip_uri_prefix(request.path)
+ endpoint = urllib.parse.quote_plus(endpoint, safe="/?&=-_")
except UnicodeDecodeError as e:
# This `on_new_response` callback is also called when a HTTP 400
# is returned because of an invalid UTF-8 path. We still want metrics.
@@ -507,7 +509,7 @@ def setup_metrics(config):
unique=[
("method", request.method.lower()),
("endpoint", endpoint),
- ("status", str(request.response.status_code)),
+ ("status", str(event.response.status_code)),
]
+ metrics_matchdict_labels,
)
@@ -527,7 +529,7 @@ def setup_metrics(config):
# Observe response size.
metrics_service.observe(
"request_size",
- len(request.response.body or b""),
+ len(event.response.body or b""),
labels=[("endpoint", endpoint)] + metrics_matchdict_labels,
)
diff --git a/kinto/core/resource/__init__.py b/kinto/core/resource/__init__.py
index f8aaa7b1..da447642 100644
--- a/kinto/core/resource/__init__.py
+++ b/kinto/core/resource/__init__.py
@@ -665,7 +665,7 @@ class Resource:
obj = self._get_object_or_404(self.object_id)
self._raise_412_if_modified(obj)
- # Retreive the last_modified information from a querystring if present.
+ # Retrieve the last_modified information from a querystring if present.
last_modified = self.request.validated["querystring"].get("last_modified")
# If less or equal than current object. Ignore it.
@@ -1060,7 +1060,8 @@ class Resource:
"""Extracts filters from QueryString parameters."""
def is_valid_timestamp(value):
- return isinstance(value, int) or re.match(r'^"?\d+"?$', str(value))
+ # Is either integer, or integer as string, or integer between 2 quotes.
+ return isinstance(value, int) or re.match(r'^(\d+)$|^("\d+")$', str(value))
queryparams = self.request.validated["querystring"]
diff --git a/kinto/plugins/statsd.py b/kinto/plugins/statsd.py
index b6a8830f..5ae89401 100644
--- a/kinto/plugins/statsd.py
+++ b/kinto/plugins/statsd.py
@@ -13,6 +13,14 @@ except ImportError: # pragma: no cover
statsd_module = None
+def sanitize(value):
+ """
+ Telegraf does not support ':' in values.
+ See https://github.com/influxdata/telegraf/issues/4495
+ """
+ return value.replace(":", "") if isinstance(value, str) else value
+
+
@implementer(metrics.IMetricsService)
class StatsDService:
def __init__(self, host, port, prefix):
@@ -22,7 +30,7 @@ class StatsDService:
return self._client.timer(key)
def observe(self, key, value, labels=[]):
- return self._client.gauge(key, value)
+ return self._client.gauge(key, sanitize(value))
def count(self, key, count=1, unique=None):
if unique is None:
@@ -30,7 +38,7 @@ class StatsDService:
if isinstance(unique, list):
# [("method", "get")] -> "method.get"
# [("endpoint", "/"), ("method", "get")] -> "endpoint./.method.get"
- unique = ".".join(f"{label[0]}.{label[1]}" for label in unique)
+ unique = ".".join(f"{label[0]}.{sanitize(label[1])}" for label in unique)
else:
warnings.warn(
"`unique` parameter should be of type ``list[tuple[str, str]]``",
| Crash with `psycopg2.errors.InvalidTextRepresentation` (bigint) with last_modified = `1733242309482"`
```
(psycopg2.errors.InvalidTextRepresentation) invalid input syntax for type bigint: "1733242309482""
LINE 7: AND as_epoch(last_modified) > '1733242309482"'
^
[SQL:
SELECT id, as_epoch(last_modified) AS last_modified, data
FROM objects
WHERE parent_id = %(parent_id)s
AND resource_name = %(resource_name)s
AND as_epoch(last_modified) > %(filters_value_0)s
ORDER BY last_modified DESC
LIMIT %(pagination_limit)s;
]
[parameters: {'parent_id': '/buckets/main/collections/quicksuggest', 'resource_name': 'record', 'filters_value_0': '1733242309482"', 'pagination_limit': 10001}]
(Background on this error at: https://sqlalche.me/e/20/9h9h
)
``` | Kinto/kinto | diff --git a/tests/core/resource/test_filter.py b/tests/core/resource/test_filter.py
index d34eb8f0..1ccd14ac 100644
--- a/tests/core/resource/test_filter.py
+++ b/tests/core/resource/test_filter.py
@@ -92,6 +92,10 @@ class FilteringTest(BaseTest):
self.validated["querystring"] = {"lt_last_modified": bad_value}
self.assertRaises(httpexceptions.HTTPBadRequest, self.resource.plural_get)
+ def test_filter_raises_error_if_last_modified_value_has_malformed_quotes(self):
+ self.validated["querystring"] = {"last_modified": '123"'}
+ self.assertRaises(httpexceptions.HTTPBadRequest, self.resource.plural_get)
+
def test_filter_works_with_since_none(self):
self.validated["querystring"] = {"_since": None}
result = self.resource.plural_get()
diff --git a/tests/core/test_initialization.py b/tests/core/test_initialization.py
index bac6dd42..fd57620b 100644
--- a/tests/core/test_initialization.py
+++ b/tests/core/test_initialization.py
@@ -420,6 +420,25 @@ class MetricsConfigurationTest(unittest.TestCase):
unique=[("method", "get"), ("endpoint", "/__heartbeat__"), ("status", "200")],
)
+ def test_statsd_sanitizes_url_in_metrics(self):
+ kinto.core.initialize(self.config, "0.0.1", "settings_prefix")
+ app = webtest.TestApp(self.config.make_wsgi_app())
+ app.get(
+ "/v0/changeset'%7C%22%3F%3E%3C!DOCTYPE%22http://xh3E'),'/l')%20from%20dual)%7C'",
+ status=404,
+ )
+ self.mocked().count.assert_any_call(
+ "request_summary",
+ unique=[
+ ("method", "get"),
+ (
+ "endpoint",
+ "/changeset%27%257C%2522%253F%253E%253C%21DOCTYPE%2522http%3A//xh3E%27%29%2C%27/l%27%29%2520from%2520dual%29%257C%27",
+ ),
+ ("status", "404"),
+ ],
+ )
+
def test_statsd_observe_request_size(self):
kinto.core.initialize(self.config, "0.0.1", "settings_prefix")
app = webtest.TestApp(self.config.make_wsgi_app())
diff --git a/tests/plugins/test_statsd.py b/tests/plugins/test_statsd.py
index e40af66b..d801eea8 100644
--- a/tests/plugins/test_statsd.py
+++ b/tests/plugins/test_statsd.py
@@ -61,6 +61,11 @@ class StatsdClientTest(unittest.TestCase):
self.client.count("click", unique=[("component", "menu"), ("sound", "off")])
mocked_client.set.assert_called_with("click", "component.menu.sound.off")
+ def test_values_are_sanitized(self):
+ with mock.patch.object(self.client, "_client") as mocked_client:
+ self.client.count("click", unique=[("user", "account:boss")])
+ mocked_client.set.assert_called_with("click", "user.accountboss")
+
@mock.patch("kinto.plugins.statsd.statsd_module")
def test_load_from_config(self, module_mock):
config = testing.setUp()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 3
} | 19.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev,test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | arrow==1.3.0
attrs==25.3.0
backports.tarfile==1.2.0
bcrypt==4.3.0
beautifulsoup4==4.13.3
bravado==11.1.0
bravado-core==6.1.1
build==1.2.2.post1
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
colander==2.0
colorama==0.4.6
cornice==6.1.0
cornice-swagger==1.0.1
coverage==7.8.0
cryptography==44.0.2
dockerflow==2024.4.2
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
fqdn==1.5.1
greenlet==3.1.1
hupper==1.12.1
id==1.5.0
idna==3.10
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig==2.1.0
iso8601==2.1.0
isoduration==20.11.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
jsonpatch==1.33
jsonpointer==3.0.0
jsonref==1.1.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
keyring==25.6.0
-e git+https://github.com/Kinto/kinto.git@10cca5fa71dee834068f9405cf6f7df28c14f7d4#egg=kinto
logging-color-formatter==1.1.0
markdown-it-py==3.0.0
mdurl==0.1.2
monotonic==1.6
more-itertools==10.6.0
msgpack==1.1.0
nh3==0.2.21
packaging==24.2
PasteDeploy==3.1.0
plaster==1.1.2
plaster-pastedeploy==1.0.1
playwright==1.51.0
pluggy==1.5.0
pycparser==2.22
pyee==12.1.1
Pygments==2.19.1
pyproject_hooks==1.2.0
pyramid==2.0.2
pyramid-mailer==0.15.1
pyramid_multiauth==1.0.2
pyramid_tm==2.6
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cache==1.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
python-rapidjson==1.20
pytz==2025.2
PyYAML==6.0.2
readme_renderer==44.0
referencing==0.36.2
repoze.sendmail==4.4.1
requests==2.32.3
requests-toolbelt==1.0.0
rfc3339-validator==0.1.4
rfc3986==2.0.0
rfc3986-validator==0.1.1
rich==14.0.0
rpds-py==0.24.0
ruff==0.11.2
SecretStorage==3.3.3
simplejson==3.20.1
six==1.17.0
soupsieve==2.6
swagger-spec-validator==3.0.4
tomli==2.2.1
transaction==5.0
translationstring==1.4
twine==6.1.0
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
uri-template==1.3.0
urllib3==2.3.0
venusian==3.1.1
waitress==3.0.2
webcolors==24.11.1
WebOb==1.8.9
WebTest==3.0.4
zipp==3.21.0
zope.deprecation==5.1
zope.interface==7.2
| name: kinto
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- arrow==1.3.0
- attrs==25.3.0
- backports-tarfile==1.2.0
- bcrypt==4.3.0
- beautifulsoup4==4.13.3
- bravado==11.1.0
- bravado-core==6.1.1
- build==1.2.2.post1
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- colander==2.0
- colorama==0.4.6
- cornice==6.1.0
- cornice-swagger==1.0.1
- coverage==7.8.0
- cryptography==44.0.2
- dockerflow==2024.4.2
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- fqdn==1.5.1
- greenlet==3.1.1
- hupper==1.12.1
- id==1.5.0
- idna==3.10
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- iso8601==2.1.0
- isoduration==20.11.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- jsonpatch==1.33
- jsonpointer==3.0.0
- jsonref==1.1.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- keyring==25.6.0
- kinto==19.3.1.dev5+g10cca5fa
- logging-color-formatter==1.1.0
- markdown-it-py==3.0.0
- mdurl==0.1.2
- monotonic==1.6
- more-itertools==10.6.0
- msgpack==1.1.0
- nh3==0.2.21
- packaging==24.2
- pastedeploy==3.1.0
- plaster==1.1.2
- plaster-pastedeploy==1.0.1
- playwright==1.51.0
- pluggy==1.5.0
- pycparser==2.22
- pyee==12.1.1
- pygments==2.19.1
- pyproject-hooks==1.2.0
- pyramid==2.0.2
- pyramid-mailer==0.15.1
- pyramid-multiauth==1.0.2
- pyramid-tm==2.6
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cache==1.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- python-rapidjson==1.20
- pytz==2025.2
- pyyaml==6.0.2
- readme-renderer==44.0
- referencing==0.36.2
- repoze-sendmail==4.4.1
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3339-validator==0.1.4
- rfc3986==2.0.0
- rfc3986-validator==0.1.1
- rich==14.0.0
- rpds-py==0.24.0
- ruff==0.11.2
- secretstorage==3.3.3
- simplejson==3.20.1
- six==1.17.0
- soupsieve==2.6
- swagger-spec-validator==3.0.4
- tomli==2.2.1
- transaction==5.0
- translationstring==1.4
- twine==6.1.0
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- uri-template==1.3.0
- urllib3==2.3.0
- venusian==3.1.1
- waitress==3.0.2
- webcolors==24.11.1
- webob==1.8.9
- webtest==3.0.4
- zipp==3.21.0
- zope-deprecation==5.1
- zope-interface==7.2
prefix: /opt/conda/envs/kinto
| [
"tests/core/resource/test_filter.py::FilteringTest::test_filter_raises_error_if_last_modified_value_has_malformed_quotes"
] | [] | [
"tests/core/resource/test_filter.py::FilteringTest::test_contains_any_can_filter_with_a_list_of_integers",
"tests/core/resource/test_filter.py::FilteringTest::test_contains_any_can_filter_with_a_list_of_strings",
"tests/core/resource/test_filter.py::FilteringTest::test_contains_any_fails_on_a_non_sequence_object_value",
"tests/core/resource/test_filter.py::FilteringTest::test_contains_can_filter_with_an_integer",
"tests/core/resource/test_filter.py::FilteringTest::test_contains_can_filter_with_list_of_strings",
"tests/core/resource/test_filter.py::FilteringTest::test_contains_can_filter_with_one_object",
"tests/core/resource/test_filter.py::FilteringTest::test_contains_can_filter_with_one_string",
"tests/core/resource/test_filter.py::FilteringTest::test_contains_fails_on_a_non_sequence_object_value",
"tests/core/resource/test_filter.py::FilteringTest::test_different_value",
"tests/core/resource/test_filter.py::FilteringTest::test_double_basic_filter_by_attribute",
"tests/core/resource/test_filter.py::FilteringTest::test_exclude_returns_400_if_value_has_wrong_type",
"tests/core/resource/test_filter.py::FilteringTest::test_exclude_values",
"tests/core/resource/test_filter.py::FilteringTest::test_filter_considers_string_if_syntaxically_invalid",
"tests/core/resource/test_filter.py::FilteringTest::test_filter_does_not_fail_with_complex_type_syntax",
"tests/core/resource/test_filter.py::FilteringTest::test_filter_errors_are_json_formatted",
"tests/core/resource/test_filter.py::FilteringTest::test_filter_on_id_is_supported",
"tests/core/resource/test_filter.py::FilteringTest::test_filter_on_unknown_attribute_raises_error",
"tests/core/resource/test_filter.py::FilteringTest::test_filter_raises_error_if_last_modified_value_is_empty",
"tests/core/resource/test_filter.py::FilteringTest::test_filter_raises_error_if_last_modified_value_is_not_int",
"tests/core/resource/test_filter.py::FilteringTest::test_filter_raises_error_if_since_or_before_value_is_neither_int_nor_string",
"tests/core/resource/test_filter.py::FilteringTest::test_filter_works_with_empty_list",
"tests/core/resource/test_filter.py::FilteringTest::test_filter_works_with_since_none",
"tests/core/resource/test_filter.py::FilteringTest::test_gt_value",
"tests/core/resource/test_filter.py::FilteringTest::test_has_values",
"tests/core/resource/test_filter.py::FilteringTest::test_has_values_false",
"tests/core/resource/test_filter.py::FilteringTest::test_in_values",
"tests/core/resource/test_filter.py::FilteringTest::test_include_returns_400_if_value_has_wrong_type",
"tests/core/resource/test_filter.py::FilteringTest::test_list_can_be_filtered_on_deleted_with_since",
"tests/core/resource/test_filter.py::FilteringTest::test_list_cannot_be_filtered_on_deleted_without_since",
"tests/core/resource/test_filter.py::FilteringTest::test_lt_value",
"tests/core/resource/test_filter.py::FilteringTest::test_maximal_value",
"tests/core/resource/test_filter.py::FilteringTest::test_minimal_value",
"tests/core/resource/test_filter.py::FilteringTest::test_not_string_filter",
"tests/core/resource/test_filter.py::FilteringTest::test_number_of_objects_matches_filter",
"tests/core/resource/test_filter.py::FilteringTest::test_regexp_is_strict_for_min_and_max",
"tests/core/resource/test_filter.py::FilteringTest::test_single_basic_filter_by_attribute",
"tests/core/resource/test_filter.py::FilteringTest::test_string_filters_naively_by_value",
"tests/core/resource/test_filter.py::FilteringTest::test_string_filters_searching_by_value_matching_many",
"tests/core/resource/test_filter.py::FilteringTest::test_string_filters_searching_by_value_matching_one",
"tests/core/resource/test_filter.py::FilteringTest::test_string_filters_searching_by_value_matching_vary_case",
"tests/core/resource/test_filter.py::FilteringTest::test_string_filters_searching_by_value_not_matching",
"tests/core/resource/test_filter.py::SubobjectFilteringTest::test_objects_can_be_filtered_by_subobjects",
"tests/core/resource/test_filter.py::SubobjectFilteringTest::test_subobjects_filters_are_ignored_if_not_object",
"tests/core/resource/test_filter.py::SubobjectFilteringTest::test_subobjects_filters_works_with_directives",
"tests/core/resource/test_filter.py::JSONFilteringTest::test_filter_by_empty_array",
"tests/core/resource/test_filter.py::JSONFilteringTest::test_filter_by_empty_object",
"tests/core/resource/test_filter.py::JSONFilteringTest::test_filter_by_nonempty_array",
"tests/core/resource/test_filter.py::JSONFilteringTest::test_filter_by_nonempty_object",
"tests/core/resource/test_filter.py::JSONFilteringTest::test_filter_by_null",
"tests/core/test_initialization.py::InitializationTest::test_backends_are_not_instantiated_by_default",
"tests/core/test_initialization.py::InitializationTest::test_backends_type_is_checked_when_instantiated",
"tests/core/test_initialization.py::InitializationTest::test_default_settings_are_overriden_by_application",
"tests/core/test_initialization.py::InitializationTest::test_default_settings_are_overriden_if_specified_in_initialize",
"tests/core/test_initialization.py::InitializationTest::test_environment_values_override_configuration",
"tests/core/test_initialization.py::InitializationTest::test_fails_if_no_version_is_specified",
"tests/core/test_initialization.py::InitializationTest::test_http_api_version_relies_on_project_version_by_default",
"tests/core/test_initialization.py::InitializationTest::test_http_api_version_uses_setting_if_specified",
"tests/core/test_initialization.py::InitializationTest::test_project_version_uses_setting_if_specified",
"tests/core/test_initialization.py::InitializationTest::test_set_the_project_version_if_specified",
"tests/core/test_initialization.py::InitializationTest::test_set_the_settings_prefix_from_settings_even_if_specified",
"tests/core/test_initialization.py::InitializationTest::test_set_the_settings_prefix_if_specified",
"tests/core/test_initialization.py::InitializationTest::test_specified_default_settings_are_overriden_by_application",
"tests/core/test_initialization.py::InitializationTest::test_uses_the_version_for_prefix",
"tests/core/test_initialization.py::InitializationTest::test_warns_if_not_https",
"tests/core/test_initialization.py::InitializationTest::test_warns_if_settings_prefix_is_empty",
"tests/core/test_initialization.py::InitializationTest::test_warns_if_settings_prefix_is_missing",
"tests/core/test_initialization.py::ProjectSettingsTest::test_does_raise_valueerror_if_entries_are_not_hashable",
"tests/core/test_initialization.py::ProjectSettingsTest::test_does_raise_valueerror_if_multiple_entries_are_equal",
"tests/core/test_initialization.py::ProjectSettingsTest::test_environment_can_specify_settings_prefix",
"tests/core/test_initialization.py::ProjectSettingsTest::test_raises_valueerror_if_different_multiple_entries",
"tests/core/test_initialization.py::ProjectSettingsTest::test_uses_settings_prefix",
"tests/core/test_initialization.py::ProjectSettingsTest::test_uses_unprefixed_name",
"tests/core/test_initialization.py::ApplicationWrapperTest::test_load_default_settings_converts_to_native_correctly",
"tests/core/test_initialization.py::ApplicationWrapperTest::test_load_default_settings_handle_prefix_attributes",
"tests/core/test_initialization.py::ApplicationWrapperTest::test_profiler_is_installed_if_set_to_true",
"tests/core/test_initialization.py::ApplicationWrapperTest::test_profiler_is_not_installed_if_set_to_false",
"tests/core/test_initialization.py::RequestsConfigurationTest::test_by_default_relies_on_incoming_headers",
"tests/core/test_initialization.py::RequestsConfigurationTest::test_by_default_relies_on_pyramid_application_url",
"tests/core/test_initialization.py::RequestsConfigurationTest::test_by_default_relies_on_wsgi_environment",
"tests/core/test_initialization.py::RequestsConfigurationTest::test_http_host_overrides_the_request_headers",
"tests/core/test_initialization.py::RequestsConfigurationTest::test_http_host_overrides_the_wsgi_environment",
"tests/core/test_initialization.py::RequestsConfigurationTest::test_http_scheme_overrides_the_wsgi_environment",
"tests/core/test_initialization.py::RequestsConfigurationTest::test_requests_have_a_bound_data_attribute",
"tests/core/test_initialization.py::RequestsConfigurationTest::test_subrequests_share_parent_bound_data",
"tests/core/test_initialization.py::PluginsTest::test_kinto_core_includes_are_included_manually",
"tests/core/test_initialization.py::PluginsTest::test_plugin_benefits_from_cors_setup",
"tests/core/test_initialization.py::PluginsTest::test_plugin_can_define_protected_views",
"tests/core/test_initialization.py::PluginsTest::test_write_http_methods_are_rejected_if_readonly",
"tests/plugins/test_statsd.py::StatsDMissing::test_client_instantiation_raises_properly"
] | [] | Apache License 2.0 | 20,456 | 1,027 | [
"kinto/core/initialization.py",
"kinto/core/resource/__init__.py",
"kinto/plugins/statsd.py"
] |
|
ZeroTwentyFifty__pact_methodology-51 | 122fd07a0c956b4f9ba3a14a6cce32ea46b0e4fa | 2024-12-11 09:43:09 | 2888c8b3b6a3befe622dfab19a3b87bc6b8e3a77 | codecov[bot]: ## [Codecov](https://app.codecov.io/gh/ZeroTwentyFifty/pact_methodology/pull/51?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=ZeroTwentyFifty) Report
All modified and coverable lines are covered by tests :white_check_mark:
| [Files with missing lines](https://app.codecov.io/gh/ZeroTwentyFifty/pact_methodology/pull/51?dropdown=coverage&src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=ZeroTwentyFifty) | Coverage Δ | |
|---|---|---|
| [pact\_methodology/assurance/assurance.py](https://app.codecov.io/gh/ZeroTwentyFifty/pact_methodology/pull/51?src=pr&el=tree&filepath=pact_methodology%2Fassurance%2Fassurance.py&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=ZeroTwentyFifty#diff-cGFjdF9tZXRob2RvbG9neS9hc3N1cmFuY2UvYXNzdXJhbmNlLnB5) | `98.38% <100.00%> (+0.23%)` | :arrow_up: |
| [...t\_methodology/carbon\_footprint/carbon\_footprint.py](https://app.codecov.io/gh/ZeroTwentyFifty/pact_methodology/pull/51?src=pr&el=tree&filepath=pact_methodology%2Fcarbon_footprint%2Fcarbon_footprint.py&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=ZeroTwentyFifty#diff-cGFjdF9tZXRob2RvbG9neS9jYXJib25fZm9vdHByaW50L2NhcmJvbl9mb290cHJpbnQucHk=) | `98.99% <100.00%> (+0.02%)` | :arrow_up: |
| [...arbon\_footprint/product\_or\_sector\_specific\_rule.py](https://app.codecov.io/gh/ZeroTwentyFifty/pact_methodology/pull/51?src=pr&el=tree&filepath=pact_methodology%2Fcarbon_footprint%2Fproduct_or_sector_specific_rule.py&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=ZeroTwentyFifty#diff-cGFjdF9tZXRob2RvbG9neS9jYXJib25fZm9vdHByaW50L3Byb2R1Y3Rfb3Jfc2VjdG9yX3NwZWNpZmljX3J1bGUucHk=) | `97.56% <ø> (ø)` | |
| [tests/assurance/test\_assurance.py](https://app.codecov.io/gh/ZeroTwentyFifty/pact_methodology/pull/51?src=pr&el=tree&filepath=tests%2Fassurance%2Ftest_assurance.py&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=ZeroTwentyFifty#diff-dGVzdHMvYXNzdXJhbmNlL3Rlc3RfYXNzdXJhbmNlLnB5) | `100.00% <100.00%> (ø)` | |
| [tests/carbon\_footprint/test\_carbon\_footprint.py](https://app.codecov.io/gh/ZeroTwentyFifty/pact_methodology/pull/51?src=pr&el=tree&filepath=tests%2Fcarbon_footprint%2Ftest_carbon_footprint.py&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=ZeroTwentyFifty#diff-dGVzdHMvY2FyYm9uX2Zvb3RwcmludC90ZXN0X2NhcmJvbl9mb290cHJpbnQucHk=) | `99.66% <100.00%> (+0.01%)` | :arrow_up: |
| [...\_footprint/test\_product\_or\_sector\_specific\_rule.py](https://app.codecov.io/gh/ZeroTwentyFifty/pact_methodology/pull/51?src=pr&el=tree&filepath=tests%2Fcarbon_footprint%2Ftest_product_or_sector_specific_rule.py&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=ZeroTwentyFifty#diff-dGVzdHMvY2FyYm9uX2Zvb3RwcmludC90ZXN0X3Byb2R1Y3Rfb3Jfc2VjdG9yX3NwZWNpZmljX3J1bGUucHk=) | `100.00% <100.00%> (ø)` | |
| diff --git a/pact_methodology/assurance/assurance.py b/pact_methodology/assurance/assurance.py
index aecf469..8aa4921 100644
--- a/pact_methodology/assurance/assurance.py
+++ b/pact_methodology/assurance/assurance.py
@@ -9,23 +9,32 @@ class Coverage(str, Enum):
PRODUCT_LINE = "product line"
PRODUCT_LEVEL = "product level"
+ def __str__(self):
+ return self.value
+
class Level(str, Enum):
LIMITED = "limited"
REASONABLE = "reasonable"
+ def __str__(self):
+ return self.value
+
class Boundary(str, Enum):
GATE_TO_GATE = "Gate-to-Gate"
CRADLE_TO_GATE = "Cradle-to-Gate"
+ def __str__(self):
+ return self.value
+
class Assurance:
"""
Represents an assurance in conformance with Pathfinder Framework chapter 5 and appendix B.
Args:
- assurance (bool): A boolean flag indicating whether the CarbonFootprint has been assured in line with Pathfinder Framework requirements (section 5).
+ assurance (bool): A boolean flag indicating whether the CarbonFootprint has been assured in line with PACT Methodology requirements (section 5).
provider_name (str): The non-empty name of the independent third party engaged to undertake the assurance.
coverage (Coverage | None): Level of granularity of the emissions data assured.
level (Level | None): Level of assurance applicable to the PCF.
@@ -35,7 +44,7 @@ class Assurance:
comments (str | None): Any additional comments that will clarify the interpretation of the assurance.
Attributes:
- assurance (bool): A boolean flag indicating whether the CarbonFootprint has been assured in line with Pathfinder Framework requirements (section 5).
+ assurance (bool): A boolean flag indicating whether the CarbonFootprint has been assured in line with PACT Methodology requirements (section 5).
provider_name (str): The non-empty name of the independent third party engaged to undertake the assurance.
coverage (Coverage | None): Level of granularity of the emissions data assured.
level (Level | None): Level of assurance applicable to the PCF.
@@ -110,3 +119,16 @@ class Assurance:
assurance_dict["comments"] = self.comments
return assurance_dict
+
+ def __str__(self):
+ return (
+ f"Assurance("
+ f"assurance={self.assurance}, "
+ f"provider_name='{self.provider_name}', "
+ f"coverage={self.coverage}, "
+ f"level={self.level}, "
+ f"boundary={self.boundary}, "
+ f"completed_at={self.completed_at}, "
+ f"standard_name='{self.standard_name}', "
+ f"comments='{self.comments}')"
+ )
\ No newline at end of file
diff --git a/pact_methodology/carbon_footprint/carbon_footprint.py b/pact_methodology/carbon_footprint/carbon_footprint.py
index e61648c..58fee31 100644
--- a/pact_methodology/carbon_footprint/carbon_footprint.py
+++ b/pact_methodology/carbon_footprint/carbon_footprint.py
@@ -448,3 +448,79 @@ class CarbonFootprint:
if value is not None and not all(isinstance(rule, ProductOrSectorSpecificRule) for rule in value):
raise ValueError("product_or_sector_specific_rules must be a list of ProductOrSectorSpecificRule")
self._product_or_sector_specific_rules = value
+
+ def __str__(self):
+ return (
+ f"CarbonFootprint("
+ f"declared_unit={self.declared_unit}, "
+ f"unitary_product_amount={self.unitary_product_amount}, "
+ f"p_cf_excluding_biogenic={self.p_cf_excluding_biogenic}, "
+ f"p_cf_including_biogenic={self.p_cf_including_biogenic}, "
+ f"fossil_ghg_emissions={self.fossil_ghg_emissions}, "
+ f"fossil_carbon_content={self.fossil_carbon_content}, "
+ f"biogenic_carbon_content={self.biogenic_carbon_content}, "
+ f"characterization_factors={self.characterization_factors.value}, "
+ f"ipcc_characterization_factors_sources={self.ipcc_characterization_factors_sources}, "
+ f"cross_sectoral_standards_used={[str(standard) for standard in self.cross_sectoral_standards_used]}, "
+ f"boundary_processes_description='{self.boundary_processes_description}', "
+ f"exempted_emissions_percent={self.exempted_emissions_percent}, "
+ f"exempted_emissions_description='{self.exempted_emissions_description}', "
+ f"reference_period={self.reference_period}, "
+ f"packaging_emissions_included={self.packaging_emissions_included}, "
+ f"geographical_scope={self.geographical_scope}, "
+ f"primary_data_share={self.primary_data_share}, "
+ f"dqi={self.dqi}, "
+ f"d_luc_ghg_emissions={self.d_luc_ghg_emissions}, "
+ f"land_management_ghg_emissions={self.land_management_ghg_emissions}, "
+ f"other_biogenic_ghg_emissions={self.other_biogenic_ghg_emissions}, "
+ f"biogenic_carbon_withdrawal={self.biogenic_carbon_withdrawal}, "
+ f"iluc_ghg_emissions={self.iluc_ghg_emissions}, "
+ f"aircraft_ghg_emissions={self.aircraft_ghg_emissions}, "
+ f"packaging_ghg_emissions={self.packaging_ghg_emissions}, "
+ f"allocation_rules_description='{self.allocation_rules_description}', "
+ f"uncertainty_assessment_description='{self.uncertainty_assessment_description}', "
+ f"assurance={self.assurance}, "
+ f"biogenic_accounting_methodology={self.biogenic_accounting_methodology.value}, "
+ f"product_or_sector_specific_rules={[str(rule) for rule in self.product_or_sector_specific_rules]})"
+ )
+
+ def __repr__(self):
+ return (
+ f"CarbonFootprint("
+ f"declared_unit={self.declared_unit!r}, "
+ f"unitary_product_amount={self.unitary_product_amount!r}, "
+ f"p_cf_excluding_biogenic={self.p_cf_excluding_biogenic!r}, "
+ f"p_cf_including_biogenic={self.p_cf_including_biogenic!r}, "
+ f"fossil_ghg_emissions={self.fossil_ghg_emissions!r}, "
+ f"fossil_carbon_content={self.fossil_carbon_content!r}, "
+ f"biogenic_carbon_content={self.biogenic_carbon_content!r}, "
+ f"characterization_factors={self.characterization_factors!r}, "
+ f"ipcc_characterization_factors_sources={self.ipcc_characterization_factors_sources!r}, "
+ f"cross_sectoral_standards_used={self.cross_sectoral_standards_used!r}, "
+ f"boundary_processes_description={self.boundary_processes_description!r}, "
+ f"exempted_emissions_percent={self.exempted_emissions_percent!r}, "
+ f"exempted_emissions_description={self.exempted_emissions_description!r}, "
+ f"reference_period={self.reference_period!r}, "
+ f"packaging_emissions_included={self.packaging_emissions_included!r}, "
+ f"geographical_scope={self.geographical_scope!r}, "
+ f"primary_data_share={self.primary_data_share!r}, "
+ f"dqi={self.dqi!r}, "
+ f"d_luc_ghg_emissions={self.d_luc_ghg_emissions!r}, "
+ f"land_management_ghg_emissions={self.land_management_ghg_emissions!r}, "
+ f"other_biogenic_ghg_emissions={self.other_biogenic_ghg_emissions!r}, "
+ f"biogenic_carbon_withdrawal={self.biogenic_carbon_withdrawal!r}, "
+ f"iluc_ghg_emissions={self.iluc_ghg_emissions!r}, "
+ f"aircraft_ghg_emissions={self.aircraft_ghg_emissions!r}, "
+ f"packaging_ghg_emissions={self.packaging_ghg_emissions!r}, "
+ f"allocation_rules_description={self.allocation_rules_description!r}, "
+ f"uncertainty_assessment_description={self.uncertainty_assessment_description!r}, "
+ f"assurance={self.assurance!r}, "
+ f"biogenic_accounting_methodology={self.biogenic_accounting_methodology!r}, "
+ f"product_or_sector_specific_rules={self.product_or_sector_specific_rules!r})"
+ )
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
diff --git a/pact_methodology/carbon_footprint/product_or_sector_specific_rule.py b/pact_methodology/carbon_footprint/product_or_sector_specific_rule.py
index 0e49b5b..9724d16 100644
--- a/pact_methodology/carbon_footprint/product_or_sector_specific_rule.py
+++ b/pact_methodology/carbon_footprint/product_or_sector_specific_rule.py
@@ -170,7 +170,7 @@ class ProductOrSectorSpecificRule:
A human-readable string showing the rule's attributes
"""
return (
- f"[operator={self.operator}, rule_names={self.rule_names}, other_operator_name={self.other_operator_name}]"
+ f"operator={self.operator}, rule_names={self.rule_names}, other_operator_name={self.other_operator_name}"
)
def __repr__(self) -> str:
| Expand CarbonFootprint object data model.
The `CarbonFootprint` class does not implement a full python data model, making interactions with the instance a bit of an annoyance, naturally there are automatically defined dunder methods but a more explicit approach is preferable.
This tickets does not need to implement every dunder method in the object model, but the core/usual suspects would be preferable even if it's just to set the tone and standard for future developments.
A list of what would be desirable is:
- [ ] dict
- [ ] str
- [ ] repr | ZeroTwentyFifty/pact_methodology | diff --git a/tests/assurance/test_assurance.py b/tests/assurance/test_assurance.py
index ec1dc0e..6ac9159 100644
--- a/tests/assurance/test_assurance.py
+++ b/tests/assurance/test_assurance.py
@@ -146,3 +146,26 @@ def test_assurance_init_with_invalid_comments(comments):
with pytest.raises(ValueError) as excinfo:
Assurance(True, "My Auditor", comments=comments)
assert str(excinfo.value) == "comments must be a string"
+
+def test_assurance_str():
+ assurance = Assurance(
+ assurance=True,
+ provider_name="Example Provider",
+ coverage=Coverage.PCF_SYSTEM,
+ level=Level.REASONABLE,
+ boundary=Boundary.GATE_TO_GATE,
+ completed_at=DateTime("2023-01-01T00:00:00Z"),
+ standard_name="Example Standard",
+ comments="Example comments",
+ )
+ assert str(assurance) == (
+ f"Assurance("
+ f"assurance=True, "
+ f"provider_name='Example Provider', "
+ f"coverage=PCF system, "
+ f"level=reasonable, "
+ f"boundary=Gate-to-Gate, "
+ f"completed_at=2023-01-01T00:00:00Z, "
+ f"standard_name='Example Standard', "
+ f"comments='Example comments')"
+ )
\ No newline at end of file
diff --git a/tests/carbon_footprint/test_carbon_footprint.py b/tests/carbon_footprint/test_carbon_footprint.py
index 5a67ae2..441fde1 100644
--- a/tests/carbon_footprint/test_carbon_footprint.py
+++ b/tests/carbon_footprint/test_carbon_footprint.py
@@ -21,7 +21,7 @@ from pact_methodology.carbon_footprint.geographical_scope import (
GeographicalGranularity,
)
from pact_methodology.data_quality_indicators.data_quality_indicators import (
- DataQualityIndicators,
+ DataQualityIndicators, DataQualityRating
)
from pact_methodology.carbon_footprint.biogenic_accounting_methodology import BiogenicAccountingMethodology
from pact_methodology.carbon_footprint.product_or_sector_specific_rule import ProductOrSectorSpecificRule
@@ -44,7 +44,7 @@ def valid_carbon_footprint_data():
"boundary_processes_description": "boundary processes description",
"exempted_emissions_percent": 1.0,
"exempted_emissions_description": "Rationale for exclusion",
- "reference_period": ReferencePeriod(start=DateTime.now(), end=DateTime.now()),
+ "reference_period": ReferencePeriod(start=DateTime("2022-01-01T00:00:00Z"), end=DateTime("2022-12-31T23:59:59Z")),
"packaging_emissions_included": True,
"geographical_scope": CarbonFootprintGeographicalScope(
global_scope=True,
@@ -54,7 +54,13 @@ def valid_carbon_footprint_data():
),
"primary_data_share": 50.0,
"dqi": DataQualityIndicators(
- reference_period=ReferencePeriod(start=DateTime.now(), end=DateTime.now())
+ reference_period=ReferencePeriod(start=DateTime("2022-01-01T00:00:00Z"), end=DateTime("2022-12-31T23:59:59Z")),
+ coverage_percent=80.0,
+ technological_dqr=DataQualityRating(2),
+ temporal_dqr=DataQualityRating(2),
+ geographical_dqr=DataQualityRating(1),
+ completeness_dqr=DataQualityRating(2),
+ reliability_dqr=DataQualityRating(3),
),
"d_luc_ghg_emissions": 2,
"land_management_ghg_emissions": 1.0,
@@ -798,3 +804,93 @@ def test_carbon_footprint_invalid_product_or_sector_specific_rules(
with pytest.raises(ValueError) as excinfo:
CarbonFootprint(**invalid_data)
assert str(excinfo.value) == expected_error
+
+def test_carbon_footprint_str(valid_carbon_footprint_data):
+ carbon_footprint = CarbonFootprint(**valid_carbon_footprint_data)
+ assert str(carbon_footprint) == (
+ f"CarbonFootprint("
+ f"declared_unit=kilogram, "
+ f"unitary_product_amount=1.0, "
+ f"p_cf_excluding_biogenic=0.5, "
+ f"p_cf_including_biogenic=2, "
+ f"fossil_ghg_emissions=0.3, "
+ f"fossil_carbon_content=0.2, "
+ f"biogenic_carbon_content=0.1, "
+ f"characterization_factors=AR6, "
+ f"ipcc_characterization_factors_sources=['AR6'], "
+ f"cross_sectoral_standards_used=['GHG Protocol Product standard'], "
+ f"boundary_processes_description='boundary processes description', "
+ f"exempted_emissions_percent=1.0, "
+ f"exempted_emissions_description='Rationale for exclusion', "
+ f"reference_period=ReferencePeriod(start=2022-01-01T00:00:00Z, end=2022-12-31T23:59:59Z), "
+ f"packaging_emissions_included=True, "
+ f"geographical_scope=Geographical scope: Global (at Global level), "
+ f"primary_data_share=50.0, "
+ f"dqi=DataQualityIndicators("
+ f"reference_period=ReferencePeriod(start=2022-01-01T00:00:00Z, end=2022-12-31T23:59:59Z), "
+ f"coverage_percent=80.0, "
+ f"technological_dqr=2, "
+ f"temporal_dqr=2, "
+ f"geographical_dqr=1, "
+ f"completeness_dqr=2, "
+ f"reliability_dqr=3), "
+ f"d_luc_ghg_emissions=2, "
+ f"land_management_ghg_emissions=1.0, "
+ f"other_biogenic_ghg_emissions=1.5, "
+ f"biogenic_carbon_withdrawal=-1.0, "
+ f"iluc_ghg_emissions=1.0, "
+ f"aircraft_ghg_emissions=1.0, "
+ f"packaging_ghg_emissions=1.0, "
+ f"allocation_rules_description='Example allocation rules description', "
+ f"uncertainty_assessment_description='Example uncertainty assessment description', "
+ f"assurance=Assurance(assurance=True, provider_name='Example provider name', coverage=PCF system, "
+ f"level=reasonable, boundary=Gate-to-Gate, completed_at={carbon_footprint.assurance.completed_at}, "
+ f"standard_name='Example standard name', comments='Example comments'), "
+ f"biogenic_accounting_methodology=GHGP, "
+ f"product_or_sector_specific_rules=[\"operator=Other, rule_names=['Rule1'], other_operator_name=Custom Operator\"])"
+ )
+
+def test_carbon_footprint_repr(valid_carbon_footprint_data):
+ carbon_footprint = CarbonFootprint(**valid_carbon_footprint_data)
+ assert repr(carbon_footprint) == (
+ f"CarbonFootprint(declared_unit={carbon_footprint.declared_unit!r}, "
+ f"unitary_product_amount={carbon_footprint.unitary_product_amount!r}, "
+ f"p_cf_excluding_biogenic={carbon_footprint.p_cf_excluding_biogenic!r}, "
+ f"p_cf_including_biogenic={carbon_footprint.p_cf_including_biogenic!r}, "
+ f"fossil_ghg_emissions={carbon_footprint.fossil_ghg_emissions!r}, "
+ f"fossil_carbon_content={carbon_footprint.fossil_carbon_content!r}, "
+ f"biogenic_carbon_content={carbon_footprint.biogenic_carbon_content!r}, "
+ f"characterization_factors={carbon_footprint.characterization_factors!r}, "
+ f"ipcc_characterization_factors_sources={carbon_footprint.ipcc_characterization_factors_sources!r}, "
+ f"cross_sectoral_standards_used={carbon_footprint.cross_sectoral_standards_used!r}, "
+ f"boundary_processes_description={carbon_footprint.boundary_processes_description!r}, "
+ f"exempted_emissions_percent={carbon_footprint.exempted_emissions_percent!r}, "
+ f"exempted_emissions_description={carbon_footprint.exempted_emissions_description!r}, "
+ f"reference_period={carbon_footprint.reference_period!r}, "
+ f"packaging_emissions_included={carbon_footprint.packaging_emissions_included!r}, "
+ f"geographical_scope={carbon_footprint.geographical_scope!r}, "
+ f"primary_data_share={carbon_footprint.primary_data_share!r}, "
+ f"dqi={carbon_footprint.dqi!r}, "
+ f"d_luc_ghg_emissions={carbon_footprint.d_luc_ghg_emissions!r}, "
+ f"land_management_ghg_emissions={carbon_footprint.land_management_ghg_emissions!r}, "
+ f"other_biogenic_ghg_emissions={carbon_footprint.other_biogenic_ghg_emissions!r}, "
+ f"biogenic_carbon_withdrawal={carbon_footprint.biogenic_carbon_withdrawal!r}, "
+ f"iluc_ghg_emissions={carbon_footprint.iluc_ghg_emissions!r}, "
+ f"aircraft_ghg_emissions={carbon_footprint.aircraft_ghg_emissions!r}, "
+ f"packaging_ghg_emissions={carbon_footprint.packaging_ghg_emissions!r}, "
+ f"allocation_rules_description={carbon_footprint.allocation_rules_description!r}, "
+ f"uncertainty_assessment_description={carbon_footprint.uncertainty_assessment_description!r}, "
+ f"assurance={carbon_footprint.assurance!r}, "
+ f"biogenic_accounting_methodology={carbon_footprint.biogenic_accounting_methodology!r}, "
+ f"product_or_sector_specific_rules={carbon_footprint.product_or_sector_specific_rules!r})"
+ )
+
+def test_carbon_footprint_eq(valid_carbon_footprint_data):
+ carbon_footprint1 = CarbonFootprint(**valid_carbon_footprint_data)
+ carbon_footprint2 = CarbonFootprint(**valid_carbon_footprint_data)
+ assert carbon_footprint1 == carbon_footprint2
+
+def test_carbon_footprint_not_eq(valid_carbon_footprint_data):
+ carbon_footprint1 = CarbonFootprint(**valid_carbon_footprint_data)
+ carbon_footprint2 = CarbonFootprint(**{**valid_carbon_footprint_data, "unitary_product_amount": 2.0})
+ assert carbon_footprint1 != carbon_footprint2
diff --git a/tests/carbon_footprint/test_product_or_sector_specific_rule.py b/tests/carbon_footprint/test_product_or_sector_specific_rule.py
index 40d7b3f..b62e98c 100644
--- a/tests/carbon_footprint/test_product_or_sector_specific_rule.py
+++ b/tests/carbon_footprint/test_product_or_sector_specific_rule.py
@@ -84,7 +84,7 @@ def test_product_or_sector_specific_rule_to_dict(valid_product_or_sector_specifi
def test_product_or_sector_specific_rule_str(valid_product_or_sector_specific_rule):
rule = ProductOrSectorSpecificRule(**valid_product_or_sector_specific_rule)
- expected_str = "[operator=Other, rule_names=['Rule1'], other_operator_name=Custom Operator]"
+ expected_str = "operator=Other, rule_names=['Rule1'], other_operator_name=Custom Operator"
assert str(rule) == expected_str
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 3
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.11",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | casregnum==1.0.1
coverage==7.8.0
execnet==2.1.1
iniconfig==2.1.0
packaging==24.2
-e git+https://github.com/ZeroTwentyFifty/pact_methodology.git@122fd07a0c956b4f9ba3a14a6cce32ea46b0e4fa#egg=pact_methodology
pluggy==1.5.0
pycountry==23.12.11
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
six==1.17.0
typing_extensions==4.13.0
urnparse==0.2.2
| name: pact_methodology
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py311h06a4308_0
- python=3.11.11=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py311h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py311h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- casregnum==1.0.1
- coverage==7.8.0
- execnet==2.1.1
- iniconfig==2.1.0
- packaging==24.2
- pact-methodology==0.0.2
- pluggy==1.5.0
- pycountry==23.12.11
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- six==1.17.0
- typing-extensions==4.13.0
- urnparse==0.2.2
prefix: /opt/conda/envs/pact_methodology
| [
"tests/assurance/test_assurance.py::test_assurance_str",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_str",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_repr",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_eq",
"tests/carbon_footprint/test_product_or_sector_specific_rule.py::test_product_or_sector_specific_rule_str"
] | [] | [
"tests/assurance/test_assurance.py::test_assurance_init",
"tests/assurance/test_assurance.py::test_assurance_to_dict",
"tests/assurance/test_assurance.py::test_coverage_enum",
"tests/assurance/test_assurance.py::test_level_enum",
"tests/assurance/test_assurance.py::test_boundary_enum",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_coverage",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_level",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_boundary",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_enum_values",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_assurance[True]",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_assurance[False]",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_assurance[True]",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_assurance[1]",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_assurance[None]",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_provider_name[My",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_provider_name[Your",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_provider_name[True]",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_provider_name[1]",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_provider_name[None]",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_completed_at[completed_at0]",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_completed_at[completed_at1]",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_completed_at[None]",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_completed_at[True]",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_completed_at[1]",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_standard_name[ISO",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_standard_name[Another",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_standard_name[None]",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_standard_name[True]",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_standard_name[1]",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_comments[Some",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_comments[More",
"tests/assurance/test_assurance.py::test_assurance_init_with_valid_comments[None]",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_comments[True]",
"tests/assurance/test_assurance.py::test_assurance_init_with_invalid_comments[1]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_exists",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_instantiation",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_attributes",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_declared_unit",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_unitary_product_amount",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_p_cf_excluding_biogenic",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_fossil_ghg_emissions",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_fossil_carbon_content",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_biogenic_carbon_content",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_characterization_factors",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_ipcc_characterization_factors_sources",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_cross_sectoral_standards_used",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_boundary_processes_description",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_exempted_emissions_percent",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_exempted_emissions_description",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_reference_period",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_packaging_emissions_included",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_packaging_emissions_included",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_attribute[p_cf_including_biogenic-1.0]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_attribute[d_luc_ghg_emissions-1.0]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_attribute[land_management_ghg_emissions-1.0]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_attribute[other_biogenic_ghg_emissions-1.5]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_attribute[biogenic_carbon_withdrawal--1.0]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_attribute_type[p_cf_including_biogenic-not",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_attribute_type[d_luc_ghg_emissions-not",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_attribute_type[land_management_ghg_emissions-not",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_attribute_type[other_biogenic_ghg_emissions-not",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_attribute_type[other_biogenic_ghg_emissions--1-other_biogenic_ghg_emissions",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_attribute_type[biogenic_carbon_withdrawal-not",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_attribute_type[biogenic_carbon_withdrawal-1-biogenic_carbon_withdrawal",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_attribute_optional_before_2025[p_cf_including_biogenic]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_attribute_optional_before_2025[d_luc_ghg_emissions]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_attribute_optional_before_2025[land_management_ghg_emissions]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_attribute_optional_before_2025[other_biogenic_ghg_emissions]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_attribute_optional_before_2025[biogenic_carbon_withdrawal]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_attribute_optional_before_2025[biogenic_accounting_methodology]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_missing_attributes_valid_before_2025[p_cf_including_biogenic]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_missing_attributes_valid_before_2025[d_luc_ghg_emissions]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_missing_attributes_valid_before_2025[land_management_ghg_emissions]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_missing_attributes_valid_before_2025[other_biogenic_ghg_emissions]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_missing_attributes_valid_before_2025[biogenic_carbon_withdrawal]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_missing_attributes_valid_before_2025[biogenic_accounting_methodology]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_missing_attributes_invalid_after_2025[p_cf_including_biogenic]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_missing_attributes_invalid_after_2025[d_luc_ghg_emissions]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_missing_attributes_invalid_after_2025[land_management_ghg_emissions]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_missing_attributes_invalid_after_2025[other_biogenic_ghg_emissions]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_missing_attributes_invalid_after_2025[biogenic_carbon_withdrawal]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_missing_attributes_invalid_after_2025[biogenic_accounting_methodology]",
"tests/carbon_footprint/test_carbon_footprint.py::test_primary_data_share_optional_before_2025",
"tests/carbon_footprint/test_carbon_footprint.py::test_primary_data_share_required_after_2025",
"tests/carbon_footprint/test_carbon_footprint.py::test_dqi_optional_before_2025",
"tests/carbon_footprint/test_carbon_footprint.py::test_dqi_required_after_2025",
"tests/carbon_footprint/test_carbon_footprint.py::test_primary_data_share_or_dqi_required_before_2025",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_geographical_scope_type",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_geographical_scope",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_geographical_scope_attributes",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_iluc_ghg_emissions[None]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_iluc_ghg_emissions[0]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_iluc_ghg_emissions[1]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_iluc_ghg_emissions[100]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_iluc_ghg_emissions[0.0]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_iluc_ghg_emissions[1.0]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_iluc_ghg_emissions[100.0]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_iluc_ghg_emissions[-1.0-iluc_ghg_emissions",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_iluc_ghg_emissions[not",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_aircraft_ghg_emissions[None]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_aircraft_ghg_emissions[0]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_aircraft_ghg_emissions[1]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_aircraft_ghg_emissions[100]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_aircraft_ghg_emissions[0.0]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_aircraft_ghg_emissions[1.0]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_aircraft_ghg_emissions[100.0]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_aircraft_ghg_emissions[-1.0-aircraft_ghg_emissions",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_aircraft_ghg_emissions[not",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_packaging_ghg_emissions[0.0-True]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_packaging_ghg_emissions[1.0-True]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_packaging_ghg_emissions[100.0-True]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_packaging_ghg_emissions[None-False]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_packaging_ghg_emissions[-1.0-True-packaging_ghg_emissions",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_packaging_ghg_emissions[not",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_packaging_ghg_emissions[1.0-False-packaging_ghg_emissions",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_packaging_ghg_emissions[None-True-packaging_ghg_emissions",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_allocation_rules_description[None]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_allocation_rules_description[Example",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_allocation_rules_description[1-allocation_rules_description",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_uncertainty_assessment_description[None]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_uncertainty_assessment_description[uncertainty",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_uncertainty_assessment_description[1-uncertainty_assessment_description",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_assurance[None]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_assurance[assurance1]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_assurance[1-assurance",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_product_or_sector_specific_rules[None]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_valid_product_or_sector_specific_rules[product_or_sector_specific_rules1]",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_invalid_product_or_sector_specific_rules[product_or_sector_specific_rules0-product_or_sector_specific_rules",
"tests/carbon_footprint/test_carbon_footprint.py::test_carbon_footprint_not_eq",
"tests/carbon_footprint/test_product_or_sector_specific_rule.py::test_product_or_sector_specific_rule_init_valid",
"tests/carbon_footprint/test_product_or_sector_specific_rule.py::test_product_or_sector_specific_rule_init_invalid_operator",
"tests/carbon_footprint/test_product_or_sector_specific_rule.py::test_product_or_sector_specific_rule_init_invalid_rule_names",
"tests/carbon_footprint/test_product_or_sector_specific_rule.py::test_product_or_sector_specific_rule_init_missing_operator",
"tests/carbon_footprint/test_product_or_sector_specific_rule.py::test_product_or_sector_specific_rule_init_missing_rule_names",
"tests/carbon_footprint/test_product_or_sector_specific_rule.py::test_product_or_sector_specific_rule_init_other_operator_name_when_operator_not_other",
"tests/carbon_footprint/test_product_or_sector_specific_rule.py::test_product_or_sector_specific_rule_init_valid_rule_names",
"tests/carbon_footprint/test_product_or_sector_specific_rule.py::test_product_or_sector_specific_rule_to_dict",
"tests/carbon_footprint/test_product_or_sector_specific_rule.py::test_product_or_sector_specific_rule_repr"
] | [] | MIT License | 20,461 | 2,277 | [
"pact_methodology/assurance/assurance.py",
"pact_methodology/carbon_footprint/carbon_footprint.py",
"pact_methodology/carbon_footprint/product_or_sector_specific_rule.py"
] |
probabl-ai__skore-914 | 115c7a2d832e27ef284a592e92fdc72c0abebe17 | 2024-12-11 11:23:44 | d59781c5cf324845762a287385d40a634347b7c2 | github-actions[bot]: <!-- Pytest Coverage Comment: coverage-skore -->
<a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/README.md"><img alt="Coverage" src="https://img.shields.io/badge/Coverage-91%25-brightgreen.svg" /></a><details><summary>pytest coverage report </summary><table><tr><th>File</th><th>Stmts</th><th>Miss</th><th>Cover</th><th>Missing</th></tr><tbody><tr><td colspan="5"><b>src/skore</b></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/__init__.py">\_\_init\_\_.py</a></td><td>18</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/__main__.py">\_\_main\_\_.py</a></td><td>8</td><td>1</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/__main__.py#L 80%"> 80%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/exceptions.py">exceptions.py</a></td><td>4</td><td>0</td><td>100%</td><td> </td></tr><tr><td colspan="5"><b>src/skore/cli</b></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/cli/__init__.py">\_\_init\_\_.py</a></td><td>8</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/cli/cli.py">cli.py</a></td><td>32</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/cli/launch_dashboard.py">launch_dashboard.py</a></td><td>22</td><td>12</td><td>0</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/cli/launch_dashboard.py#L 42%"> 42%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/cli/quickstart_command.py">quickstart_command.py</a></td><td>12</td><td>2</td><td>0</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/cli/quickstart_command.py#L 83%"> 83%</a></td></tr><tr><td colspan="5"><b>src/skore/item</b></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/__init__.py">\_\_init\_\_.py</a></td><td>22</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/cross_validation_aggregation_item.py">cross_validation_aggregation_item.py</a></td><td>54</td><td>1</td><td>3</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/cross_validation_aggregation_item.py#L 94%"> 94%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/cross_validation_item.py">cross_validation_item.py</a></td><td>48</td><td>1</td><td>0</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/cross_validation_item.py#L 98%"> 98%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/item.py">item.py</a></td><td>22</td><td>1</td><td>0</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/item.py#L 95%"> 95%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/item_repository.py">item_repository.py</a></td><td>43</td><td>2</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/item_repository.py#L 94%"> 94%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/media_item.py">media_item.py</a></td><td>60</td><td>4</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/media_item.py#L 93%"> 93%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/numpy_array_item.py">numpy_array_item.py</a></td><td>21</td><td>1</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/numpy_array_item.py#L 92%"> 92%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/pandas_dataframe_item.py">pandas_dataframe_item.py</a></td><td>30</td><td>1</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/pandas_dataframe_item.py#L 94%"> 94%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/pandas_series_item.py">pandas_series_item.py</a></td><td>30</td><td>1</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/pandas_series_item.py#L 94%"> 94%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/polars_dataframe_item.py">polars_dataframe_item.py</a></td><td>28</td><td>1</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/polars_dataframe_item.py#L 94%"> 94%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/polars_series_item.py">polars_series_item.py</a></td><td>23</td><td>1</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/polars_series_item.py#L 93%"> 93%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/primitive_item.py">primitive_item.py</a></td><td>23</td><td>2</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/primitive_item.py#L 91%"> 91%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/sklearn_base_estimator_item.py">sklearn_base_estimator_item.py</a></td><td>29</td><td>1</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/sklearn_base_estimator_item.py#L 94%"> 94%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/skrub_table_report_item.py">skrub_table_report_item.py</a></td><td>10</td><td>1</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/item/skrub_table_report_item.py#L 86%"> 86%</a></td></tr><tr><td colspan="5"><b>src/skore/persistence</b></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/persistence/__init__.py">\_\_init\_\_.py</a></td><td>0</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/persistence/abstract_storage.py">abstract_storage.py</a></td><td>22</td><td>1</td><td>0</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/persistence/abstract_storage.py#L 95%"> 95%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/persistence/disk_cache_storage.py">disk_cache_storage.py</a></td><td>33</td><td>1</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/persistence/disk_cache_storage.py#L 95%"> 95%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/persistence/in_memory_storage.py">in_memory_storage.py</a></td><td>20</td><td>0</td><td>100%</td><td> </td></tr><tr><td colspan="5"><b>src/skore/project</b></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/project/__init__.py">\_\_init\_\_.py</a></td><td>4</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/project/create.py">create.py</a></td><td>50</td><td>8</td><td>0</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/project/create.py#L 88%"> 88%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/project/load.py">load.py</a></td><td>23</td><td>3</td><td>0</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/project/load.py#L 89%"> 89%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/project/project.py">project.py</a></td><td>62</td><td>4</td><td>4</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/project/project.py#L 91%"> 91%</a></td></tr><tr><td colspan="5"><b>src/skore/sklearn</b></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/__init__.py">\_\_init\_\_.py</a></td><td>3</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/find_ml_task.py">find_ml_task.py</a></td><td>19</td><td>2</td><td>3</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/find_ml_task.py#L 85%"> 85%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/types.py">types.py</a></td><td>2</td><td>0</td><td>100%</td><td> </td></tr><tr><td colspan="5"><b>src/skore/sklearn/cross_validation</b></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/cross_validation/__init__.py">\_\_init\_\_.py</a></td><td>2</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/cross_validation/cross_validation_helpers.py">cross_validation_helpers.py</a></td><td>40</td><td>4</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/cross_validation/cross_validation_helpers.py#L 89%"> 89%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/cross_validation/cross_validation_reporter.py">cross_validation_reporter.py</a></td><td>27</td><td>2</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/cross_validation/cross_validation_reporter.py#L 90%"> 90%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/cross_validation/plot.py">plot.py</a></td><td>38</td><td>1</td><td>3</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/cross_validation/plot.py#L 92%"> 92%</a></td></tr><tr><td colspan="5"><b>src/skore/sklearn/train_test_split</b></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/__init__.py">\_\_init\_\_.py</a></td><td>0</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/train_test_split.py">train_test_split.py</a></td><td>34</td><td>2</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/train_test_split.py#L 94%"> 94%</a></td></tr><tr><td colspan="5"><b>src/skore/sklearn/train_test_split/warning</b></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/__init__.py">\_\_init\_\_.py</a></td><td>8</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/high_class_imbalance_too_few_examples_warning.py">high_class_imbalance_too_few_examples_warning.py</a></td><td>17</td><td>3</td><td>2</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/high_class_imbalance_too_few_examples_warning.py#L 78%"> 78%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/high_class_imbalance_warning.py">high_class_imbalance_warning.py</a></td><td>18</td><td>2</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/high_class_imbalance_warning.py#L 88%"> 88%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/random_state_unset_warning.py">random_state_unset_warning.py</a></td><td>11</td><td>1</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/random_state_unset_warning.py#L 87%"> 87%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/shuffle_true_warning.py">shuffle_true_warning.py</a></td><td>9</td><td>0</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/shuffle_true_warning.py#L 91%"> 91%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/stratify_is_set_warning.py">stratify_is_set_warning.py</a></td><td>11</td><td>1</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/stratify_is_set_warning.py#L 87%"> 87%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/time_based_column_warning.py">time_based_column_warning.py</a></td><td>22</td><td>1</td><td>2</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/time_based_column_warning.py#L 89%"> 89%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/train_test_split_warning.py">train_test_split_warning.py</a></td><td>5</td><td>1</td><td>0</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/sklearn/train_test_split/warning/train_test_split_warning.py#L 80%"> 80%</a></td></tr><tr><td colspan="5"><b>src/skore/ui</b></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/ui/__init__.py">\_\_init\_\_.py</a></td><td>0</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/ui/app.py">app.py</a></td><td>25</td><td>5</td><td>2</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/ui/app.py#L 71%"> 71%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/ui/dependencies.py">dependencies.py</a></td><td>7</td><td>1</td><td>0</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/ui/dependencies.py#L 86%"> 86%</a></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/ui/project_routes.py">project_routes.py</a></td><td>85</td><td>5</td><td>2</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/ui/project_routes.py#L 92%"> 92%</a></td></tr><tr><td colspan="5"><b>src/skore/utils</b></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/utils/__init__.py">\_\_init\_\_.py</a></td><td>0</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/utils/_show_versions.py">_show_versions.py</a></td><td>29</td><td>0</td><td>100%</td><td> </td></tr><tr><td colspan="5"><b>src/skore/view</b></td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/view/__init__.py">\_\_init\_\_.py</a></td><td>0</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/view/view.py">view.py</a></td><td>5</td><td>0</td><td>100%</td><td> </td></tr><tr><td> <a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/view/view_repository.py">view_repository.py</a></td><td>16</td><td>2</td><td>1</td><td><a href="https://github.com/probabl-ai/skore/blob/03d0d6c21c0494deffa2f6e0b9fc331b577455b6/src/skore/view/view_repository.py#L 83%"> 83%</a></td></tr><tr><td><b>TOTAL</b></td><td><b>1194</b></td><td><b>83</b></td><td><b>91%</b></td><td> </td></tr></tbody></table></details>
| Tests | Skipped | Failures | Errors | Time |
| ----- | ------- | -------- | -------- | ------------------ |
| 172 | 0 :zzz: | 0 :x: | 0 :fire: | 39.198s :stopwatch: |
| diff --git a/skore/src/skore/item/cross_validation_aggregation_item.py b/skore/src/skore/item/cross_validation_aggregation_item.py
index adfb036..ae29ae9 100644
--- a/skore/src/skore/item/cross_validation_aggregation_item.py
+++ b/skore/src/skore/item/cross_validation_aggregation_item.py
@@ -5,6 +5,7 @@ This class represents the aggregation of several cross-validation runs.
from __future__ import annotations
+import contextlib
from functools import cached_property
import plotly.graph_objects
@@ -36,10 +37,19 @@ def plot_cross_validation_aggregation(
_cv_results = cv_results_items_versions.copy()
- df = pandas.DataFrame([v.cv_results_serialized for v in _cv_results])
- df = df.drop(columns=["indices", "estimator"], errors="ignore")
- df = df.apply(pandas.Series.explode)
- df = df.reset_index(names="run_number")
+ cv_list = [v.cv_results_serialized for v in _cv_results]
+ df_list = []
+ for run, cv in enumerate(cv_list):
+ for key in ["indices","estimator"]:
+ with contextlib.suppress(KeyError):
+ del cv[key]
+
+ df_run = pandas.DataFrame(cv_list[run])
+ df_run = df_run.reset_index(names="fold_number")
+ df_run["run_number"] = run
+ df_list.append(df_run)
+
+ df = pandas.concat(df_list)
# Move time columns to last and "test_score" to first
if "fit_time" in df.columns:
| Adding extra scores
### Describe the bug
I am using [this guide](https://scikit-learn.org/dev/modules/model_evaluation.html#r2-score) to figure out extra metric names for `cross_validate`. However, when I run this:
```python
from skore import cross_validate
cross_validate(HistGradientBoostingRegressor(), X, y, cv=5, project=my_project, scoring=["neg_mean_absolute_error", "r2", "explained_variance"])
```
Then I see this:
```python
Traceback (most recent call last):
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/marimo/_runtime/executor.py", line 158, in execute_cell
return eval(cell.last_expr, glbls)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Cell marimo:///workspaces/solah/overview.py#cell=cell-9, line 3, in <module>
cross_validate(HistGradientBoostingRegressor(), X, y, cv=5, project=my_project, scoring=["neg_mean_absolute_error", "r2", "explained_variance"])
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/skore/sklearn/cross_validate.py", line 268, in cross_validate
agg_cross_validation_item = CrossValidationAggregationItem.factory(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/skore/item/cross_validation_item.py", line 421, in factory
plot = plot_cross_validation_aggregation(cv_results_items_versions)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/skore/item/cross_validation_item.py", line 167, in plot_cross_validation_aggregation
df = df.apply(pandas.Series.explode)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/pandas/core/frame.py", line 10374, in apply
return op.apply().__finalize__(self, method="apply")
^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/pandas/core/apply.py", line 916, in apply
return self.apply_standard()
^^^^^^^^^^^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/pandas/core/apply.py", line 1068, in apply_standard
return self.wrap_results(results, res_index)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/pandas/core/apply.py", line 1107, in wrap_results
return self.wrap_results_for_axis(results, res_index)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/pandas/core/apply.py", line 1219, in wrap_results_for_axis
result = self.obj._constructor(data=results)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/pandas/core/frame.py", line 778, in __init__
mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/pandas/core/internals/construction.py", line 503, in dict_to_mgr
return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/pandas/core/internals/construction.py", line 119, in arrays_to_mgr
arrays, refs = _homogenize(arrays, index, dtype)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/pandas/core/internals/construction.py", line 611, in _homogenize
val = val.reindex(index, copy=False)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/pandas/core/series.py", line 5153, in reindex
return super().reindex(
^^^^^^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/pandas/core/generic.py", line 5610, in reindex
return self._reindex_axes(
^^^^^^^^^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/pandas/core/generic.py", line 5633, in _reindex_axes
new_index, indexer = ax.reindex(
^^^^^^^^^^^
File "/home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/lib/python3.12/site-packages/pandas/core/indexes/base.py", line 4429, in reindex
raise ValueError("cannot reindex on an axis with duplicate labels")
ValueError: cannot reindex on an axis with duplicate labels
```
The docs suggest that a list/tuple can be an input, but running the code suggests otherwise.
### Steps/Code to Reproduce
You can also mock it easily.
```python
from skore import cross_validate
import numpy as np
cross_validate(HistGradientBoostingRegressor(), np.array([1,2,3,4,5]).reshape(-1, 1), [1,2,3,4,5], cv=3, project=my_project, scoring=("r2", ))
```
### Expected Behavior
No error.
### Actual Behavior
Error.
### Environment
```shell
System:
python: 3.12.1 (main, Sep 30 2024, 17:05:21) [GCC 9.4.0]
executable: /home/codespace/.cache/uv/archive-v0/YOp_16Z6MvqhVvqkrfuya/bin/python
machine: Linux-6.5.0-1025-azure-x86_64-with-glibc2.31
Python dependencies:
skore: 0.4.1
pip: None
setuptools: None
diskcache: 5.6.3
fastapi: 0.115.6
plotly<6,>=5: None
pyarrow: 18.1.0
rich: 13.9.4
skops: 0.11.0
uvicorn: 0.32.1
```
| probabl-ai/skore | diff --git a/skore/tests/integration/sklearn/test_cross_validate.py b/skore/tests/integration/sklearn/test_cross_validate.py
index 2f5e1d3..bfc11f0 100644
--- a/skore/tests/integration/sklearn/test_cross_validate.py
+++ b/skore/tests/integration/sklearn/test_cross_validate.py
@@ -8,7 +8,10 @@ from sklearn.ensemble import RandomForestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.svm import SVC
from skore import CrossValidationReporter
-from skore.item.cross_validation_aggregation_item import CrossValidationAggregationItem
+from skore.item.cross_validation_aggregation_item import (
+ CrossValidationAggregationItem,
+ plot_cross_validation_aggregation,
+)
from skore.item.cross_validation_item import CrossValidationItem
@@ -167,6 +170,21 @@ def test_aggregated_cross_validation(rf, in_memory_project):
CrossValidationAggregationItem.factory(project.get_item_versions("cv"))
+def test_plot_aggregated_cross_validation(rf, in_memory_project):
+ """Cross-validation runs with different metrics can be aggregated."""
+ project = in_memory_project
+ args = rf
+ reporter1 = CrossValidationReporter(*args)
+ reporter2 = CrossValidationReporter(
+ *args, scoring=["neg_mean_absolute_error", "r2", "explained_variance"]
+ )
+
+ project.put("cv", reporter1)
+ project.put("cv", reporter2)
+
+ plot_cross_validation_aggregation(project.get_item_versions("cv"))
+
+
def prepare_cv():
from sklearn import datasets, linear_model
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e './skore[test,sphinx]'",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pre-commit"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | accessible-pygments==0.0.5
alabaster==0.7.16
altair==5.5.0
annotated-types==0.7.0
anyio==4.9.0
asttokens==3.0.0
attrs==25.3.0
babel==2.17.0
beautifulsoup4==4.13.3
certifi==2025.1.31
cfgv==3.4.0
charset-normalizer==3.4.1
click==8.1.8
contourpy==1.3.0
coverage==7.8.0
cycler==0.12.1
decorator==5.2.1
diskcache==5.6.3
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
executing==2.2.0
fastapi==0.115.12
filelock==3.18.0
fonttools==4.56.0
fsspec==2025.3.1
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
huggingface-hub==0.30.0
identify==2.6.9
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig==2.1.0
ipython==8.18.1
jedi==0.19.2
Jinja2==3.1.6
joblib==1.4.2
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
kaleido==0.2.1
kiwisolver==1.4.7
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.9.4
matplotlib-inline==0.1.7
mdurl==0.1.2
narwhals==1.32.0
nodeenv==1.9.1
numpy==2.0.2
numpydoc==1.8.0
packaging==24.2
pandas==2.2.3
parso==0.8.4
pexpect==4.9.0
pillow==11.1.0
platformdirs==4.3.7
plotly==5.24.1
pluggy==1.5.0
polars==1.26.0
pre_commit==4.2.0
prompt_toolkit==3.0.50
ptyprocess==0.7.0
pure_eval==0.2.3
pyarrow==19.0.1
pydantic==2.11.1
pydantic_core==2.33.0
pydata-sphinx-theme==0.16.1
Pygments==2.19.1
pyparsing==3.2.3
pytest==8.3.5
pytest-cov==6.0.0
pytest-order==1.3.0
pytest-randomly==3.16.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
referencing==0.36.2
requests==2.32.3
rich==14.0.0
rpds-py==0.24.0
ruff==0.11.2
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
skops==0.11.0
-e git+https://github.com/probabl-ai/skore.git@115c7a2d832e27ef284a592e92fdc72c0abebe17#egg=skore&subdirectory=skore
skrub==0.5.1
sniffio==1.3.1
snowballstemmer==2.2.0
soupsieve==2.6
Sphinx==7.4.7
sphinx-copybutton==0.5.2
sphinx-gallery==0.19.0
sphinx_design==0.6.1
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
stack-data==0.6.3
starlette==0.46.1
tabulate==0.9.0
tenacity==9.0.0
threadpoolctl==3.6.0
tomli==2.2.1
tqdm==4.67.1
traitlets==5.14.3
typing-inspection==0.4.0
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
uvicorn==0.34.0
virtualenv==20.29.3
wcwidth==0.2.13
zipp==3.21.0
| name: skore
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- accessible-pygments==0.0.5
- alabaster==0.7.16
- altair==5.5.0
- annotated-types==0.7.0
- anyio==4.9.0
- asttokens==3.0.0
- attrs==25.3.0
- babel==2.17.0
- beautifulsoup4==4.13.3
- certifi==2025.1.31
- cfgv==3.4.0
- charset-normalizer==3.4.1
- click==8.1.8
- contourpy==1.3.0
- coverage==7.8.0
- cycler==0.12.1
- decorator==5.2.1
- diskcache==5.6.3
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- executing==2.2.0
- fastapi==0.115.12
- filelock==3.18.0
- fonttools==4.56.0
- fsspec==2025.3.1
- h11==0.14.0
- httpcore==1.0.7
- httpx==0.28.1
- huggingface-hub==0.30.0
- identify==2.6.9
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- ipython==8.18.1
- jedi==0.19.2
- jinja2==3.1.6
- joblib==1.4.2
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- kaleido==0.2.1
- kiwisolver==1.4.7
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib==3.9.4
- matplotlib-inline==0.1.7
- mdurl==0.1.2
- narwhals==1.32.0
- nodeenv==1.9.1
- numpy==2.0.2
- numpydoc==1.8.0
- packaging==24.2
- pandas==2.2.3
- parso==0.8.4
- pexpect==4.9.0
- pillow==11.1.0
- platformdirs==4.3.7
- plotly==5.24.1
- pluggy==1.5.0
- polars==1.26.0
- pre-commit==4.2.0
- prompt-toolkit==3.0.50
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pyarrow==19.0.1
- pydantic==2.11.1
- pydantic-core==2.33.0
- pydata-sphinx-theme==0.16.1
- pygments==2.19.1
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-order==1.3.0
- pytest-randomly==3.16.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- referencing==0.36.2
- requests==2.32.3
- rich==14.0.0
- rpds-py==0.24.0
- ruff==0.11.2
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- skops==0.11.0
- skore==0.0.0+unknown
- skrub==0.5.1
- sniffio==1.3.1
- snowballstemmer==2.2.0
- soupsieve==2.6
- sphinx==7.4.7
- sphinx-copybutton==0.5.2
- sphinx-design==0.6.1
- sphinx-gallery==0.19.0
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- stack-data==0.6.3
- starlette==0.46.1
- tabulate==0.9.0
- tenacity==9.0.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- tqdm==4.67.1
- traitlets==5.14.3
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- tzdata==2025.2
- urllib3==2.3.0
- uvicorn==0.34.0
- virtualenv==20.29.3
- wcwidth==0.2.13
- zipp==3.21.0
prefix: /opt/conda/envs/skore
| [
"skore/tests/integration/sklearn/test_cross_validate.py::test_plot_aggregated_cross_validation"
] | [] | [
"skore/tests/integration/sklearn/test_cross_validate.py::TestMLTask::test_cross_validate[multiclass_classification_sub_estimator]",
"skore/tests/integration/sklearn/test_cross_validate.py::TestMLTask::test_cross_validate[multiclass_classification]",
"skore/tests/integration/sklearn/test_cross_validate.py::TestMLTask::test_cross_validate[binary_classification]",
"skore/tests/integration/sklearn/test_cross_validate.py::TestMLTask::test_cross_validate[regression]",
"skore/tests/integration/sklearn/test_cross_validate.py::TestMLTask::test_cross_validate[clustering]",
"skore/tests/integration/sklearn/test_cross_validate.py::TestMLTask::test_cross_validate[multiclass_classification_no_predict_proba]",
"skore/tests/integration/sklearn/test_cross_validate.py::test_put_cross_validation_reporter",
"skore/tests/integration/sklearn/test_cross_validate.py::test_aggregated_cross_validation",
"skore/tests/integration/sklearn/test_cross_validate.py::TestInputDataType::test_data_type[data_is_pandas]",
"skore/tests/integration/sklearn/test_cross_validate.py::TestInputDataType::test_data_type[data_is_list]",
"skore/tests/integration/sklearn/test_cross_validate.py::TestInputScorers::test_scorer[scoring3]",
"skore/tests/integration/sklearn/test_cross_validate.py::TestInputScorers::test_scorer[true_positive]",
"skore/tests/integration/sklearn/test_cross_validate.py::TestInputScorers::test_scorer[None]",
"skore/tests/integration/sklearn/test_cross_validate.py::TestInputScorers::test_scorer[accuracy]",
"skore/tests/integration/sklearn/test_cross_validate.py::TestInputScorers::test_scorer[scoring2]",
"skore/tests/integration/sklearn/test_cross_validate.py::TestInputScorers::test_scorer[confusion_matrix]",
"skore/tests/integration/sklearn/test_cross_validate.py::TestInputScorers::test_scorer[scoring4]"
] | [] | MIT License | 20,462 | 377 | [
"skore/src/skore/item/cross_validation_aggregation_item.py"
] |
tobymao__sqlglot-4505 | 8f8e84ae81d60bea224e35b9ca88b0bb4a59512b | 2024-12-11 19:01:06 | 0d61fa0e28650a79d608e140bc725c46d6de5706 | diff --git a/sqlglot/dialects/snowflake.py b/sqlglot/dialects/snowflake.py
index 92a1671a..37842fd1 100644
--- a/sqlglot/dialects/snowflake.py
+++ b/sqlglot/dialects/snowflake.py
@@ -1097,7 +1097,11 @@ class Snowflake(Dialect):
else:
unnest_alias = exp.TableAlias(this="_u", columns=columns)
- explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
+ table_input = self.sql(expression.expressions[0])
+ if not table_input.startswith("INPUT =>"):
+ table_input = f"INPUT => {table_input}"
+
+ explode = f"TABLE(FLATTEN({table_input}))"
alias = self.sql(unnest_alias)
alias = f" AS {alias}" if alias else ""
return f"{explode}{alias}"
| Snowflake parsing/output error with Lateral Flatten
Snowflake SQL statement becomes invalid once parsed and output by SQLGlot. An extra `INPUT =>` is added to the query which then fails with `Syntax error: unexpected '=>'. ` The extra `INPUT =>` is visible in the last `CROSS JOIN` statement.
**Fully reproducible code snippet**
```python
from sqlglot import parse_one, exp, parse
sql = """
SELECT
uc.user_id
, uc.start_ts AS ts
, CASE
WHEN uc.start_ts::DATE >= '2023-01-01' AND uc.country_code IN ('US')
AND uc.user_id NOT IN (SELECT DISTINCT _id FROM users, LATERAL FLATTEN(INPUT => PARSE_JSON(flags)) datasource WHERE datasource.value:name = 'something')
THEN 'Sample1'
ELSE
'Sample2'
END AS entity
FROM user_countries AS uc
LEFT JOIN (SELECT user_id, MAX(IFF(service_entity IS NULL,1,0)) AS le_null FROM accepted_user_agreements GROUP BY 1) AS aua
ON uc.user_id = aua.user_id
"""
ast = parse_one(sql, dialect="snowflake")
print(ast.sql(dialect="snowflake", pretty=True))
```
Output:
```
SELECT
uc.user_id,
uc.start_ts AS ts,
CASE
WHEN CAST(uc.start_ts AS DATE) >= '2023-01-01'
AND uc.country_code IN ('US')
AND uc.user_id <> ALL (
SELECT DISTINCT
_id
FROM users, LATERAL IFF(_u.pos = _u_2.pos_2, _u_2.entity, NULL) AS datasource(SEQ, KEY, PATH, INDEX, VALUE, THIS)
WHERE
GET_PATH(datasource.value, 'name') = 'something'
)
THEN 'Sample1'
ELSE 'Sample2'
END AS entity
FROM user_countries AS uc
LEFT JOIN (
SELECT
user_id,
MAX(IFF(service_entity IS NULL, 1, 0)) AS le_null
FROM accepted_user_agreements
GROUP BY
1
) AS aua
ON uc.user_id = aua.user_id
CROSS JOIN TABLE(FLATTEN(INPUT => ARRAY_GENERATE_RANGE(0, (
GREATEST(ARRAY_SIZE(INPUT => PARSE_JSON(flags))) - 1
) + 1))) AS _u(seq, key, path, index, pos, this)
CROSS JOIN TABLE(FLATTEN(INPUT => INPUT => PARSE_JSON(flags))) AS _u_2(seq, key, path, pos_2, entity, this)
WHERE
_u.pos = _u_2.pos_2
OR (
_u.pos > (
ARRAY_SIZE(INPUT => PARSE_JSON(flags)) - 1
)
AND _u_2.pos_2 = (
ARRAY_SIZE(INPUT => PARSE_JSON(flags)) - 1
)
)
```
Snowflake error:

| tobymao/sqlglot | diff --git a/tests/dialects/test_snowflake.py b/tests/dialects/test_snowflake.py
index 5164b821..1d55f35d 100644
--- a/tests/dialects/test_snowflake.py
+++ b/tests/dialects/test_snowflake.py
@@ -21,27 +21,6 @@ class TestSnowflake(Validator):
expr.selects[0].assert_is(exp.AggFunc)
self.assertEqual(expr.sql(dialect="snowflake"), "SELECT APPROX_TOP_K(C4, 3, 5) FROM t")
- self.assertEqual(
- exp.select(exp.Explode(this=exp.column("x")).as_("y", quoted=True)).sql(
- "snowflake", pretty=True
- ),
- """SELECT
- IFF(_u.pos = _u_2.pos_2, _u_2."y", NULL) AS "y"
-FROM TABLE(FLATTEN(INPUT => ARRAY_GENERATE_RANGE(0, (
- GREATEST(ARRAY_SIZE(x)) - 1
-) + 1))) AS _u(seq, key, path, index, pos, this)
-CROSS JOIN TABLE(FLATTEN(INPUT => x)) AS _u_2(seq, key, path, pos_2, "y", this)
-WHERE
- _u.pos = _u_2.pos_2
- OR (
- _u.pos > (
- ARRAY_SIZE(x) - 1
- ) AND _u_2.pos_2 = (
- ARRAY_SIZE(x) - 1
- )
- )""",
- )
-
self.validate_identity("exclude := [foo]")
self.validate_identity("SELECT CAST([1, 2, 3] AS VECTOR(FLOAT, 3))")
self.validate_identity("SELECT CONNECT_BY_ROOT test AS test_column_alias")
@@ -1603,6 +1582,27 @@ WHERE
)
def test_flatten(self):
+ self.assertEqual(
+ exp.select(exp.Explode(this=exp.column("x")).as_("y", quoted=True)).sql(
+ "snowflake", pretty=True
+ ),
+ """SELECT
+ IFF(_u.pos = _u_2.pos_2, _u_2."y", NULL) AS "y"
+FROM TABLE(FLATTEN(INPUT => ARRAY_GENERATE_RANGE(0, (
+ GREATEST(ARRAY_SIZE(x)) - 1
+) + 1))) AS _u(seq, key, path, index, pos, this)
+CROSS JOIN TABLE(FLATTEN(INPUT => x)) AS _u_2(seq, key, path, pos_2, "y", this)
+WHERE
+ _u.pos = _u_2.pos_2
+ OR (
+ _u.pos > (
+ ARRAY_SIZE(x) - 1
+ ) AND _u_2.pos_2 = (
+ ARRAY_SIZE(x) - 1
+ )
+ )""",
+ )
+
self.validate_all(
"""
select
@@ -1627,6 +1627,75 @@ FROM cs.telescope.dag_report, TABLE(FLATTEN(input => SPLIT(operators, ','))) AS
},
pretty=True,
)
+ self.validate_all(
+ """
+ SELECT
+ uc.user_id,
+ uc.start_ts AS ts,
+ CASE
+ WHEN uc.start_ts::DATE >= '2023-01-01' AND uc.country_code IN ('US') AND uc.user_id NOT IN (
+ SELECT DISTINCT
+ _id
+ FROM
+ users,
+ LATERAL FLATTEN(INPUT => PARSE_JSON(flags)) datasource
+ WHERE datasource.value:name = 'something'
+ )
+ THEN 'Sample1'
+ ELSE 'Sample2'
+ END AS entity
+ FROM user_countries AS uc
+ LEFT JOIN (
+ SELECT user_id, MAX(IFF(service_entity IS NULL,1,0)) AS le_null
+ FROM accepted_user_agreements
+ GROUP BY 1
+ ) AS aua
+ ON uc.user_id = aua.user_id
+ """,
+ write={
+ "snowflake": """SELECT
+ uc.user_id,
+ uc.start_ts AS ts,
+ CASE
+ WHEN CAST(uc.start_ts AS DATE) >= '2023-01-01'
+ AND uc.country_code IN ('US')
+ AND uc.user_id <> ALL (
+ SELECT DISTINCT
+ _id
+ FROM users, LATERAL IFF(_u.pos = _u_2.pos_2, _u_2.entity, NULL) AS datasource(SEQ, KEY, PATH, INDEX, VALUE, THIS)
+ WHERE
+ GET_PATH(datasource.value, 'name') = 'something'
+ )
+ THEN 'Sample1'
+ ELSE 'Sample2'
+ END AS entity
+FROM user_countries AS uc
+LEFT JOIN (
+ SELECT
+ user_id,
+ MAX(IFF(service_entity IS NULL, 1, 0)) AS le_null
+ FROM accepted_user_agreements
+ GROUP BY
+ 1
+) AS aua
+ ON uc.user_id = aua.user_id
+CROSS JOIN TABLE(FLATTEN(INPUT => ARRAY_GENERATE_RANGE(0, (
+ GREATEST(ARRAY_SIZE(INPUT => PARSE_JSON(flags))) - 1
+) + 1))) AS _u(seq, key, path, index, pos, this)
+CROSS JOIN TABLE(FLATTEN(INPUT => PARSE_JSON(flags))) AS _u_2(seq, key, path, pos_2, entity, this)
+WHERE
+ _u.pos = _u_2.pos_2
+ OR (
+ _u.pos > (
+ ARRAY_SIZE(INPUT => PARSE_JSON(flags)) - 1
+ )
+ AND _u_2.pos_2 = (
+ ARRAY_SIZE(INPUT => PARSE_JSON(flags)) - 1
+ )
+ )""",
+ },
+ pretty=True,
+ )
# All examples from https://docs.snowflake.com/en/sql-reference/functions/flatten.html#syntax
self.validate_all(
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 26.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
coverage==7.8.0
distlib==0.3.9
duckdb==1.2.1
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
identify==2.6.9
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
maturin==1.8.3
mypy==1.15.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pandas-stubs==2.2.2.240807
pdoc==15.0.1
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
Pygments==2.19.1
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
ruff==0.7.2
six==1.17.0
-e git+https://github.com/tobymao/sqlglot.git@8f8e84ae81d60bea224e35b9ca88b0bb4a59512b#egg=sqlglot
tomli==2.2.1
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
typing_extensions==4.13.0
tzdata==2025.2
virtualenv==20.29.3
| name: sqlglot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- coverage==7.8.0
- distlib==0.3.9
- duckdb==1.2.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- identify==2.6.9
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- maturin==1.8.3
- mypy==1.15.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pandas-stubs==2.2.2.240807
- pdoc==15.0.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- pygments==2.19.1
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- ruff==0.7.2
- six==1.17.0
- tomli==2.2.1
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- tzdata==2025.2
- virtualenv==20.29.3
prefix: /opt/conda/envs/sqlglot
| [
"tests/dialects/test_snowflake.py::TestSnowflake::test_flatten"
] | [] | [
"tests/dialects/test_snowflake.py::TestSnowflake::test_alter_set_unset",
"tests/dialects/test_snowflake.py::TestSnowflake::test_copy",
"tests/dialects/test_snowflake.py::TestSnowflake::test_ddl",
"tests/dialects/test_snowflake.py::TestSnowflake::test_describe_table",
"tests/dialects/test_snowflake.py::TestSnowflake::test_from_changes",
"tests/dialects/test_snowflake.py::TestSnowflake::test_grant",
"tests/dialects/test_snowflake.py::TestSnowflake::test_historical_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_match_recognize",
"tests/dialects/test_snowflake.py::TestSnowflake::test_minus",
"tests/dialects/test_snowflake.py::TestSnowflake::test_null_treatment",
"tests/dialects/test_snowflake.py::TestSnowflake::test_parse_like_any",
"tests/dialects/test_snowflake.py::TestSnowflake::test_querying_semi_structured_data",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_replace",
"tests/dialects/test_snowflake.py::TestSnowflake::test_regexp_substr",
"tests/dialects/test_snowflake.py::TestSnowflake::test_sample",
"tests/dialects/test_snowflake.py::TestSnowflake::test_semi_structured_types",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_columns",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_imported_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_objects",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_primary_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_schemas",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_sequences",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_tables",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_unique_keys",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_users",
"tests/dialects/test_snowflake.py::TestSnowflake::test_show_views",
"tests/dialects/test_snowflake.py::TestSnowflake::test_snowflake",
"tests/dialects/test_snowflake.py::TestSnowflake::test_staged_files",
"tests/dialects/test_snowflake.py::TestSnowflake::test_storage_integration",
"tests/dialects/test_snowflake.py::TestSnowflake::test_stored_procedures",
"tests/dialects/test_snowflake.py::TestSnowflake::test_swap",
"tests/dialects/test_snowflake.py::TestSnowflake::test_table_literal",
"tests/dialects/test_snowflake.py::TestSnowflake::test_timestamps",
"tests/dialects/test_snowflake.py::TestSnowflake::test_try_cast",
"tests/dialects/test_snowflake.py::TestSnowflake::test_user_defined_functions",
"tests/dialects/test_snowflake.py::TestSnowflake::test_values",
"tests/dialects/test_snowflake.py::TestSnowflake::test_window_function_arg"
] | [] | MIT License | 20,468 | 230 | [
"sqlglot/dialects/snowflake.py"
] |
|
nilearn__nilearn-4939 | d57c0d86e991e8de5fed4ab1ff9fc8350ddeeb90 | 2024-12-12 09:02:02 | d979bacad0dfd0600e35a23f9f08d3df7907e489 | github-actions[bot]: 👋 @Remi-Gau Thanks for creating a PR!
Until this PR is ready for review, you can include the [WIP] tag in its title, or leave it as a github draft.
Please make sure it is compliant with our [contributing guidelines](https://nilearn.github.io/stable/development.html#contribution-guidelines). In particular, be sure it checks the boxes listed below.
- [ ] PR has an interpretable title.
- [ ] PR links to Github issue with mention `Closes #XXXX` (see our documentation on [PR structure](https://nilearn.github.io/stable/development.html#pr-structure))
- [ ] Code is PEP8-compliant (see our documentation on [coding style](https://nilearn.github.io/stable/development.html#coding-style))
- [ ] Changelog or what's new entry in `doc/changes/latest.rst` (see our documentation on [PR structure](https://nilearn.github.io/stable/development.html#pr-structure))
For new features:
- [ ] There is at least one unit test per new function / class (see our documentation on [testing](https://nilearn.github.io/stable/development.html#tests))
- [ ] The new feature is demoed in at least one relevant example.
For bug fixes:
- [ ] There is at least one test that would fail under the original bug conditions.
We will review it as quick as possible, feel free to ping us with questions if needed.
man-shu: Hmmm... very weird indeed! I think we should first start with writing separate tests for each: fit, transform, and inverse_transform
Remi-Gau: > Hmmm... very weird indeed! I think we should first start with writing separate tests for each: fit, transform, and inverse_transform
agreed
do you want to take care of it ore should I?
man-shu: > do you want to take care of it ore should I?
On it!
man-shu: Ok, so this is what I understood so far:
This method uses sklearn's [KMeans](https://scikit-learn.org/stable/modules/clustering.html#k-means) under the hood (actually, it uses MiniBatchKMeans but they are the essentially the same for us and the difference between them is beside the point).
Sklearn's KMeans is trying to find the clusters in **samples**, and we want to find clusters in **features**, so unless we transpose our array after masking, it will give us weird results.
That is why the `.transform` expects us to provide X in the `(n_features, n_samples)` form so that later on, our features would be passed into MiniBatchKMeans as samples (remember all sklearn estimators expect X in `(n_samples, n_features)` form), and it would find the clusters in **our features**. So, the onus is on the user to provide the X in "correct" form.
I think we should make the API consistent and do the transpose under the hood and not expect the user to do so.
Please correct me if I am wrong @emdupre @bthirion .
man-shu: @Remi-Gau pinging you for a review on this one
man-shu: Oh so turns out almost all parcellation methods have this transpose issue because they are based on sklearn methods...
Remi-Gau: > Oh so turns out almost all parcellation methods have this transpose issue because they are based on sklearn methods...
irony mode: on
great!!!!
irony mode: off
emdupre: > Oh so turns out almost all parcellation methods have this transpose issue because they are based on sklearn methods...
Sorry what is the "issue" in this case, just to make sure I understand ? I thought the original issue was to streamline the user experience across parcellation methods, so I'm not sure how all parcellation methods could have that issue !
codecov[bot]: ## [Codecov](https://app.codecov.io/gh/nilearn/nilearn/pull/4939?dropdown=coverage&src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) Report
All modified and coverable lines are covered by tests :white_check_mark:
> Project coverage is 93.20%. Comparing base [(`d642bb8`)](https://app.codecov.io/gh/nilearn/nilearn/commit/d642bb8a63e19620729fbb52d72dffb933fa707a?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) to head [(`cfa759f`)](https://app.codecov.io/gh/nilearn/nilearn/commit/cfa759f82b16749e6200527fe47dbf93f8ff2d23?dropdown=coverage&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn).
<details><summary>Additional details and impacted files</summary>
```diff
@@ Coverage Diff @@
## main #4939 +/- ##
==========================================
- Coverage 93.85% 93.20% -0.66%
==========================================
Files 139 139
Lines 17414 17417 +3
Branches 2977 2977
==========================================
- Hits 16344 16233 -111
- Misses 564 650 +86
- Partials 506 534 +28
```
| [Flag](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flags&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | Coverage Δ | |
|---|---|---|
| [macos-latest_3.10_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [macos-latest_3.11_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [macos-latest_3.12_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `93.20% <100.00%> (+<0.01%)` | :arrow_up: |
| [macos-latest_3.13_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `93.20% <100.00%> (+<0.01%)` | :arrow_up: |
| [macos-latest_3.9_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [ubuntu-latest_3.10_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [ubuntu-latest_3.11_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [ubuntu-latest_3.12_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [ubuntu-latest_3.13_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [ubuntu-latest_3.13_test_pre](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [ubuntu-latest_3.9_test_min](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [ubuntu-latest_3.9_test_plot_min](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [ubuntu-latest_3.9_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [windows-latest_3.10_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [windows-latest_3.11_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [windows-latest_3.12_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [windows-latest_3.13_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
| [windows-latest_3.9_test_plotting](https://app.codecov.io/gh/nilearn/nilearn/pull/4939/flags?src=pr&el=flag&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | `?` | |
Flags with carried forward coverage won't be shown. [Click here](https://docs.codecov.io/docs/carryforward-flags?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn#carryforward-flags-in-the-pull-request-comment) to find out more.
</details>
[:umbrella: View full report in Codecov by Sentry](https://app.codecov.io/gh/nilearn/nilearn/pull/4939?dropdown=coverage&src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn).
:loudspeaker: Have feedback on the report? [Share it here](https://about.codecov.io/codecov-pr-comment-feedback/?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn).
man-shu: Ok, this one is ready for a review @Remi-Gau
Remi-Gau: probably worth adding a changelog entry, no?
man-shu: > probably worth adding a changelog entry, no?
Oh yes, sorry, I always forget that | diff --git a/nilearn/regions/hierarchical_kmeans_clustering.py b/nilearn/regions/hierarchical_kmeans_clustering.py
index 3949170f4..457f1b113 100644
--- a/nilearn/regions/hierarchical_kmeans_clustering.py
+++ b/nilearn/regions/hierarchical_kmeans_clustering.py
@@ -259,7 +259,7 @@ class HierarchicalKMeans(ClusterMixin, TransformerMixin, BaseEstimator):
Parameters
----------
- X : ndarray, shape = [n_features, n_samples]
+ X : ndarray, shape = [n_samples, n_features]
Training data.
y : Ignored
@@ -270,6 +270,13 @@ class HierarchicalKMeans(ClusterMixin, TransformerMixin, BaseEstimator):
X = check_array(
X, ensure_min_features=2, ensure_min_samples=2, estimator=self
)
+ # Transpose the data so that we can cluster features (voxels)
+ # and input them as samples to the sklearn's clustering algorithm
+ # This is because sklearn's clustering algorithm does clustering
+ # on samples and not on features
+ X = X.T
+ # n_features for the sklearn's clustering algorithm would be the
+ # number of samples in the input data
n_features = X.shape[1]
if self.n_clusters <= 0:
@@ -310,15 +317,19 @@ class HierarchicalKMeans(ClusterMixin, TransformerMixin, BaseEstimator):
Parameters
----------
- X : ndarray, shape = [n_features, n_samples]
+ X : ndarray, shape = [n_samples, n_features]
Data to transform with the fitted clustering.
Returns
-------
- X_red : ndarray, shape = [n_clusters, n_samples]
+ X_red : ndarray, shape = [n_samples, n_clusters]
Data reduced with agglomerated signal for each cluster
"""
check_is_fitted(self, "labels_")
+
+ # Transpose the data so that we can cluster features (voxels)
+ # and input them as samples to the sklearn's clustering algorithm
+ X = X.T
unique_labels = np.arange(self.n_clusters)
mean_cluster = np.empty(
@@ -332,6 +343,9 @@ class HierarchicalKMeans(ClusterMixin, TransformerMixin, BaseEstimator):
if self.scaling:
X_red = X_red * np.sqrt(self.sizes_[:, np.newaxis])
+ # Transpose the data back to the original shape i.e.
+ # (n_samples, n_clusters)
+ X_red = X_red.T
return X_red
def inverse_transform(self, X_red):
@@ -340,18 +354,19 @@ class HierarchicalKMeans(ClusterMixin, TransformerMixin, BaseEstimator):
Parameters
----------
- X_red : ndarray , shape = [n_clusters, n_samples]
+ X_red : ndarray , shape = [n_samples, n_clusters]
Data reduced with agglomerated signal for each cluster
Returns
-------
- X_inv : ndarray, shape = [n_features, n_samples]
+ X_inv : ndarray, shape = [n_samples, n_features]
Data reduced expanded to the original feature space
"""
check_is_fitted(self, "labels_")
+ X_red = X_red.T
inverse = self.labels_
if self.scaling:
X_red = X_red / np.sqrt(self.sizes_[:, np.newaxis])
X_inv = X_red[inverse, ...]
-
+ X_inv = X_inv.T
return X_inv
diff --git a/nilearn/regions/parcellations.py b/nilearn/regions/parcellations.py
index ddcd494e8..2c0d1c318 100644
--- a/nilearn/regions/parcellations.py
+++ b/nilearn/regions/parcellations.py
@@ -20,6 +20,9 @@ from .rena_clustering import ReNA
def _estimator_fit(data, estimator, method=None):
"""Estimator to fit on the data matrix.
+ Mostly just choosing which methods to transpose the data for because
+ KMeans, AgglomerativeClustering cluster first dimension of data (samples)
+ but we want to cluster features (voxels).
Parameters
----------
@@ -41,24 +44,15 @@ def _estimator_fit(data, estimator, method=None):
labels_ estimated from estimator.
"""
- if method == "rena":
- rena = ReNA(
- mask_img=estimator.mask_img,
- n_clusters=estimator.n_clusters,
- scaling=estimator.scaling,
- n_iter=estimator.n_iter,
- threshold=estimator.threshold,
- memory=estimator.memory,
- memory_level=estimator.memory_level,
- verbose=estimator.verbose,
- )
- rena.fit(data)
- labels_ = rena.labels_
-
+ estimator = clone(estimator)
+ if method in ["rena", "hierarchical_kmeans"]:
+ estimator.fit(data)
+ # transpose data for KMeans, AgglomerativeClustering because
+ # they cluster first dimension of data (samples) but we want to cluster
+ # features (voxels)
else:
- estimator = clone(estimator)
estimator.fit(data.T)
- labels_ = estimator.labels_
+ labels_ = estimator.labels_
return labels_
@@ -391,7 +385,7 @@ class Parcellations(_MultiPCA):
)
# data ou data.T
labels = self._cache(_estimator_fit, func_memory_level=1)(
- components.T, hkmeans
+ components.T, hkmeans, self.method
)
elif self.method == "rena":
| [BUG] `HierarchicalKMeans.fit()` takes transposed X
.... I noticed that `HierarchicalKMeans.fit()` takes transposed X:
https://github.com/nilearn/nilearn/blob/135e5388386c24ca879328040273127f8acb4a6a/nilearn/regions/hierarchical_kmeans_clustering.py#L233
See in `ReNA.fit()` for comparison:
https://github.com/nilearn/nilearn/blob/135e5388386c24ca879328040273127f8acb4a6a/nilearn/regions/rena_clustering.py#L518
Is this a bug or a feature?
_Originally posted by @man-shu in https://github.com/nilearn/nilearn/pull/4577#discussion_r1843840330_
| nilearn/nilearn | diff --git a/doc/changes/latest.rst b/doc/changes/latest.rst
index 79a661edd..3577af3e4 100644
--- a/doc/changes/latest.rst
+++ b/doc/changes/latest.rst
@@ -29,5 +29,7 @@ Enhancements
- :bdg-primary:`Doc` Add a :ref:`page <meaning_difference>` in the user guide to explain GLM terminology across software (Nilearn, SPM, FSL) regarding the meaning of 'levels' (:gh:`4287` by `Thiti Premrudeepreechacharn`_).
+- :bdg-dark:`Code` Update :obj:`~nilearn.regions.HierarchicalKMeans` to take X as array of shape ``[n_samples, n_features]`` (:gh:`4939` by `Himanshu Aggarwal`_ and `Rémi Gau`_).
+
Changes
-------
diff --git a/nilearn/regions/tests/test_hierarchical_kmeans_clustering.py b/nilearn/regions/tests/test_hierarchical_kmeans_clustering.py
index b527506e0..0e24b924e 100644
--- a/nilearn/regions/tests/test_hierarchical_kmeans_clustering.py
+++ b/nilearn/regions/tests/test_hierarchical_kmeans_clustering.py
@@ -6,40 +6,34 @@ from sklearn import __version__ as sklearn_version
from nilearn._utils import compare_version
from nilearn._utils.class_inspect import check_estimator
from nilearn._utils.data_gen import generate_fake_fmri
-from nilearn.input_data import NiftiMasker
-from nilearn.maskers import SurfaceMasker
+from nilearn.maskers import NiftiMasker, SurfaceMasker
from nilearn.regions.hierarchical_kmeans_clustering import (
HierarchicalKMeans,
_adjust_small_clusters,
hierarchical_k_means,
)
+from nilearn.surface import SurfaceImage
+from nilearn.surface.tests.test_surface import flat_mesh
extra_valid_checks = [
"check_clusterer_compute_labels_predict",
- "check_clustering",
"check_complex_data",
- "check_dict_unchanged",
"check_do_not_raise_errors_in_init_or_set_params",
"check_dont_overwrite_parameters",
"check_dtype_object",
"check_estimator_sparse_array",
"check_estimator_sparse_matrix",
- "check_estimators_dtypes",
"check_estimators_empty_data_messages",
- "check_estimators_fit_returns_self",
- "check_estimators_pickle",
"check_estimators_unfitted",
"check_f_contiguous_array_estimator",
"check_fit2d_1sample",
"check_fit2d_1feature",
- "check_fit_check_is_fitted",
"check_fit1d",
- "check_fit_score_takes_y",
"check_no_attributes_set_in_init",
- "check_pipeline_consistency",
- "check_readonly_memmap_input",
"check_transformers_unfitted",
"check_transformer_n_iter",
+ "check_methods_subset_invariance",
+ "check_methods_sample_order_invariance",
]
@@ -48,10 +42,12 @@ if compare_version(sklearn_version, ">", "1.5.2"):
# TODO remove when dropping support for sklearn_version < 1.5.0
if compare_version(sklearn_version, "<", "1.5.0"):
- extra_valid_checks.append("check_estimator_sparse_data")
-
-if compare_version(sklearn_version, ">=", "1.6"):
- extra_valid_checks.append("check_positive_only_tag_during_fit")
+ extra_valid_checks.extend(
+ [
+ "check_estimator_sparse_data",
+ "check_dict_unchanged",
+ ]
+ )
@pytest.mark.parametrize(
@@ -111,37 +107,76 @@ def test_hierarchical_k_means():
assert_array_almost_equal(test_labels, truth_labels)
-def test_hierarchical_k_means_clustering():
- data_img, mask_img = generate_fake_fmri(shape=(10, 11, 12), length=15)
+def test_hierarchical_k_means_clustering_transform():
+ n_samples = 15
+ n_clusters = 8
+ data_img, mask_img = generate_fake_fmri(
+ shape=(10, 11, 12), length=n_samples
+ )
+ masker = NiftiMasker(mask_img=mask_img).fit()
+ X = masker.transform(data_img)
+ hkmeans = HierarchicalKMeans(n_clusters=n_clusters).fit(X)
+ X_red = hkmeans.transform(X)
+
+ assert X_red.shape == (n_samples, n_clusters)
+
+
+def test_hierarchical_k_means_clustering_inverse_transform():
+ n_samples = 15
+ n_clusters = 8
+ data_img, mask_img = generate_fake_fmri(
+ shape=(10, 11, 12), length=n_samples
+ )
+ masker = NiftiMasker(mask_img=mask_img).fit()
+ X = masker.transform(data_img)
+ hkmeans = HierarchicalKMeans(n_clusters=n_clusters).fit(X)
+ X_red = hkmeans.transform(X)
+ X_inv = hkmeans.inverse_transform(X_red)
+
+ assert X_inv.shape == X.shape
+
+
[email protected]("n_clusters", [-2, 0])
+def test_hierarchical_k_means_clustering_error_n_clusters(n_clusters):
+ n_samples = 15
+ data_img, mask_img = generate_fake_fmri(
+ shape=(10, 11, 12), length=n_samples
+ )
masker = NiftiMasker(mask_img=mask_img).fit()
- X = masker.transform(data_img).T
+ X = masker.transform(data_img)
with pytest.raises(
ValueError,
match="n_clusters should be an integer greater than 0."
- " -2 was provided.",
+ f" {n_clusters} was provided.",
):
- HierarchicalKMeans(n_clusters=-2).fit(X)
+ HierarchicalKMeans(n_clusters=n_clusters).fit(X)
+
+
+def test_hierarchical_k_means_clustering_scaling():
+ n_samples = 15
+ n_clusters = 8
+ data_img, mask_img = generate_fake_fmri(
+ shape=(10, 11, 12), length=n_samples
+ )
+ masker = NiftiMasker(mask_img=mask_img).fit()
+ X = masker.transform(data_img)
- hkmeans = HierarchicalKMeans(n_clusters=8)
+ hkmeans = HierarchicalKMeans(n_clusters=n_clusters)
X_red = hkmeans.fit_transform(X)
X_compress = hkmeans.inverse_transform(X_red)
- assert_array_almost_equal(X.shape, X_compress.shape)
-
- hkmeans_scaled = HierarchicalKMeans(n_clusters=8, scaling=True)
+ hkmeans_scaled = HierarchicalKMeans(n_clusters=n_clusters, scaling=True)
X_red_scaled = hkmeans_scaled.fit_transform(X)
sizes = hkmeans_scaled.sizes_
X_compress_scaled = hkmeans_scaled.inverse_transform(X_red_scaled)
assert_array_almost_equal(
- np.asarray([np.sqrt(s) * a for s, a in zip(sizes, X_red)]),
+ np.asarray([np.sqrt(s) * a for s, a in zip(sizes, X_red.T)]).T,
X_red_scaled,
)
assert_array_almost_equal(X_compress, X_compress_scaled)
- del X_red, X_compress, X_red_scaled, X_compress_scaled
-
@pytest.mark.parametrize("surf_mask_dim", [1, 2])
@pytest.mark.parametrize("n_clusters", [2, 4, 5])
@@ -149,20 +184,51 @@ def test_hierarchical_k_means_clustering_surface(
surf_img_2d, surf_mask_dim, surf_mask_1d, surf_mask_2d, n_clusters
):
"""Test hierarchical k-means clustering on surface."""
+ n_samples = 100
surf_mask = surf_mask_1d if surf_mask_dim == 1 else surf_mask_2d()
# create a surface masker
masker = SurfaceMasker(surf_mask).fit()
# mask the surface image with 50 samples
- X = masker.transform(surf_img_2d(50)).T
+ X = masker.transform(surf_img_2d(n_samples))
# instantiate HierarchicalKMeans with n_clusters
- clustering = HierarchicalKMeans(n_clusters=n_clusters)
+ hkmeans = HierarchicalKMeans(n_clusters=n_clusters)
# fit and transform the data
- X_transformed = clustering.fit_transform(X)
+ X_transformed = hkmeans.fit_transform(X)
# inverse transform the transformed data
- X_inverse = clustering.inverse_transform(X_transformed)
+ X_inverse = hkmeans.inverse_transform(X_transformed)
# make sure the n_features in transformed data were reduced to n_clusters
- assert X_transformed.shape[0] == n_clusters
+ assert X_transformed.shape == (n_samples, n_clusters)
+ assert hkmeans.n_clusters == n_clusters
# make sure the inverse transformed data has the same shape as the original
assert X_inverse.shape == X.shape
+
+
[email protected]("img_type", ["surface", "volume"])
+def test_hierarchical_k_means_n_clusters_warning(img_type, rng):
+ n_samples = 15
+ if img_type == "surface":
+ mesh = {
+ "left": flat_mesh(10, 8),
+ "right": flat_mesh(9, 7),
+ }
+ data = {
+ "left": rng.standard_normal(
+ size=(mesh["left"].coordinates.shape[0], n_samples)
+ ),
+ "right": rng.standard_normal(
+ size=(mesh["right"].coordinates.shape[0], n_samples)
+ ),
+ }
+ img = SurfaceImage(mesh=mesh, data=data)
+ X = SurfaceMasker().fit_transform(img)
+ else:
+ img, _ = generate_fake_fmri(shape=(10, 11, 12), length=n_samples)
+ X = NiftiMasker().fit_transform(img)
+
+ with pytest.warns(
+ match="n_clusters should be at most the number of features.",
+ ):
+ # very high number of clusters
+ HierarchicalKMeans(n_clusters=1000).fit_transform(X)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 0.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-randomly",
"pytest-reporter-html1",
"pytest-xdist"
],
"pre_install": [
"apt-get update",
"apt-get install -y dvipng texlive-latex-base texlive-latex-extra"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
ansi2html==1.9.2
babel==2.17.0
beautifulsoup4==4.13.3
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
contourpy==1.3.0
coverage==7.8.0
cycler==0.12.1
distlib==0.3.9
docutils==0.21.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
filelock==3.18.0
fonttools==4.56.0
furo==2024.8.6
htmlmin2==0.1.13
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
joblib==1.4.2
kaleido==0.2.1
kiwisolver==1.4.7
latexcodec==3.0.0
lxml==5.3.1
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib==3.9.4
mdit-py-plugins==0.4.2
mdurl==0.1.2
memory-profiler==0.61.0
myst-parser==3.0.1
narwhals==1.32.0
nibabel==5.3.2
-e git+https://github.com/nilearn/nilearn.git@d57c0d86e991e8de5fed4ab1ff9fc8350ddeeb90#egg=nilearn
numpy==2.0.2
numpydoc==1.8.0
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pillow==11.1.0
platformdirs==4.3.7
plotly==6.0.1
pluggy @ file:///croot/pluggy_1733169602837/work
psutil==7.0.0
pybtex==0.24.0
pybtex-docutils==1.0.3
Pygments==2.19.1
pyparsing==3.2.3
pyproject-api==1.9.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
pytest-randomly==3.16.0
pytest-reporter==0.5.3
pytest-reporter-html1==0.9.2
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
requests==2.32.3
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
snowballstemmer==2.2.0
soupsieve==2.6
Sphinx==7.4.7
sphinx-basic-ng==1.0.0b2
sphinx-copybutton==0.5.2
sphinx-gallery==0.19.0
sphinx_design==0.6.1
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-bibtex==2.6.3
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
sphinxext-opengraph==0.9.1
tabulate==0.9.0
threadpoolctl==3.6.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: nilearn
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- ansi2html==1.9.2
- babel==2.17.0
- beautifulsoup4==4.13.3
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- contourpy==1.3.0
- coverage==7.8.0
- cycler==0.12.1
- distlib==0.3.9
- docutils==0.21.2
- execnet==2.1.1
- filelock==3.18.0
- fonttools==4.56.0
- furo==2024.8.6
- htmlmin2==0.1.13
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- jinja2==3.1.6
- joblib==1.4.2
- kaleido==0.2.1
- kiwisolver==1.4.7
- latexcodec==3.0.0
- lxml==5.3.1
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib==3.9.4
- mdit-py-plugins==0.4.2
- mdurl==0.1.2
- memory-profiler==0.61.0
- myst-parser==3.0.1
- narwhals==1.32.0
- nibabel==5.3.2
- nilearn==0.11.1.dev84+gd57c0d86e
- numpy==2.0.2
- numpydoc==1.8.0
- pandas==2.2.3
- pillow==11.1.0
- platformdirs==4.3.7
- plotly==6.0.1
- psutil==7.0.0
- pybtex==0.24.0
- pybtex-docutils==1.0.3
- pygments==2.19.1
- pyparsing==3.2.3
- pyproject-api==1.9.0
- pytest-cov==6.0.0
- pytest-randomly==3.16.0
- pytest-reporter==0.5.3
- pytest-reporter-html1==0.9.2
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- requests==2.32.3
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.12
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- snowballstemmer==2.2.0
- soupsieve==2.6
- sphinx==7.4.7
- sphinx-basic-ng==1.0.0b2
- sphinx-copybutton==0.5.2
- sphinx-design==0.6.1
- sphinx-gallery==0.19.0
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-bibtex==2.6.3
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sphinxext-opengraph==0.9.1
- tabulate==0.9.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/nilearn
| [
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_surface[4-1]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator18-check18-check_methods_sample_order_invariance]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_n_clusters_warning[volume]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_surface[2-2]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_surface[2-1]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_surface[5-1]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_n_clusters_warning[surface]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator19-check19-check_methods_subset_invariance]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_surface[4-2]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_transform",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_surface[5-2]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_scaling"
] | [
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator_invalid[estimator9-check9-check_estimator_sparse_tag]"
] | [
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator0-check0-check_estimator_cloneable]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator20-check20-check_get_params_invariance]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator14-check14-check_transformer_n_iter]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator7-check7-check_estimators_unfitted]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_error_n_clusters[0]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_inverse_transform",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator9-check9-check_mixin_order]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator2-check2-check_estimator_tags_renamed]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator21-check21-check_set_params]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator3-check3-check_valid_tag_types]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator15-check15-check_clusterer_compute_labels_predict]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator16-check16-check_estimators_partial_fit_n_features]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator10-check10-check_complex_data]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_adjust_small_clusters[test_list2-10]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator8-check8-check_do_not_raise_errors_in_init_or_set_params]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_adjust_small_clusters[test_list3-11]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator17-check17-check_parameters_default_constructible]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator11-check11-check_dtype_object]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_hierarchical_k_means_clustering_error_n_clusters[-2]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator6-check6-check_dont_overwrite_parameters]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator1-check1-check_estimator_cloneable]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_adjust_small_clusters[test_list1-9]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator5-check5-check_no_attributes_set_in_init]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator4-check4-check_estimator_repr]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator12-check12-check_estimators_empty_data_messages]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_check_estimator[estimator13-check13-check_transformers_unfitted]",
"nilearn/regions/tests/test_hierarchical_kmeans_clustering.py::test_adjust_small_clusters[test_list0-5]"
] | [] | New BSD License | 20,471 | 1,339 | [
"nilearn/regions/hierarchical_kmeans_clustering.py",
"nilearn/regions/parcellations.py"
] |
CrossGL__crosstl-213 | c4325a413d735e2bd8a5e46ccff907c0fec13260 | 2024-12-14 06:30:56 | 36bed5871a8d102f73cfebf82c8d8495aaa89e87 | diff --git a/crosstl/src/backend/DirectX/DirectxLexer.py b/crosstl/src/backend/DirectX/DirectxLexer.py
index b8a23b2..a2c6084 100644
--- a/crosstl/src/backend/DirectX/DirectxLexer.py
+++ b/crosstl/src/backend/DirectX/DirectxLexer.py
@@ -35,6 +35,7 @@ TOKENS = [
("COLON", r":"),
("QUESTION", r"\?"),
("SHIFT_LEFT", r"<<"),
+ ("SHIFT_RIGHT", r">>"),
("LESS_EQUAL", r"<="),
("GREATER_EQUAL", r">="),
("LESS_THAN", r"<"),
diff --git a/crosstl/src/backend/DirectX/DirectxParser.py b/crosstl/src/backend/DirectX/DirectxParser.py
index 201a410..c3cd409 100644
--- a/crosstl/src/backend/DirectX/DirectxParser.py
+++ b/crosstl/src/backend/DirectX/DirectxParser.py
@@ -239,6 +239,7 @@ class HLSLParser:
"ASSIGN_OR",
"ASSIGN_AND",
"SHIFT_LEFT",
+ "SHIFT_RIGHT",
"BITWISE_OR",
"BITWISE_XOR",
]:
@@ -259,6 +260,7 @@ class HLSLParser:
"ASSIGN_OR",
"ASSIGN_AND",
"SHIFT_LEFT",
+ "SHIFT_RIGHT",
"BITWISE_OR",
"BITWISE_XOR",
]:
@@ -282,6 +284,7 @@ class HLSLParser:
"ASSIGN_OR",
"ASSIGN_AND",
"SHIFT_LEFT",
+ "SHIFT_RIGHT",
"BITWISE_OR",
"BITWISE_XOR",
]:
@@ -413,6 +416,7 @@ class HLSLParser:
"ASSIGN_OR",
"ASSIGN_AND",
"SHIFT_LEFT",
+ "SHIFT_RIGHT",
"BITWISE_OR",
"BITWISE_XOR",
]:
@@ -454,6 +458,7 @@ class HLSLParser:
while self.current_token[0] in [
"LESS_THAN",
"SHIFT_LEFT",
+ "SHIFT_RIGHT",
"GREATER_THAN",
"LESS_EQUAL",
"GREATER_EQUAL",
| SHIFT_RIGHT : >> | CrossGL/crosstl | diff --git a/tests/test_backend/test_directx/test_codegen.py b/tests/test_backend/test_directx/test_codegen.py
index 59294a9..07ae81b 100644
--- a/tests/test_backend/test_directx/test_codegen.py
+++ b/tests/test_backend/test_directx/test_codegen.py
@@ -437,7 +437,19 @@ def test_assignment_ops_codegen():
output.redValue << 1; // Shift left by 1
output.redValue &= 0x3;
}
+
+ // Testing SHIFT_RIGHT (>>) operator on some condition
+ if (input.in_position.r == 0.25) {
+ uint redValue = asuint(output.out_color.r);
+ output.redValue ^= 0x1;
+ output.out_color.r = asfloat(redValue);
+ output.redValue |= 0x2;
+ // Applying shift left operation
+ output.redValue >> 1; // Shift left by 1
+ output.redValue &= 0x3;
+ }
+
// Testing BITWISE_XOR (^) operator on some condition
if (input.in_position.r == 0.5) {
uint redValue = asuint(output.out_color.r);
diff --git a/tests/test_backend/test_directx/test_lexer.py b/tests/test_backend/test_directx/test_lexer.py
index 296866f..cb5ea7f 100644
--- a/tests/test_backend/test_directx/test_lexer.py
+++ b/tests/test_backend/test_directx/test_lexer.py
@@ -146,6 +146,20 @@ def test_assignment_ops_tokenization():
redValue &= 0x3;
}
+
+ // Testing SHIFT RIGHT (>>) operator on some condition
+ if (input.in_position.r == 0.25) {
+ uint redValue = asuint(output.out_color.r);
+ output.redValue ^= 0x1;
+ output.out_color.r = asfloat(redValue);
+ output.redValue |= 0x2;
+
+ // Applying shift left operation
+ output.redValue >> 1; // Shift left by 1
+ redValue |= 0x2;
+
+ redValue &= 0x3;
+ }
return output;
diff --git a/tests/test_backend/test_directx/test_parser.py b/tests/test_backend/test_directx/test_parser.py
index c4bbc89..809a182 100644
--- a/tests/test_backend/test_directx/test_parser.py
+++ b/tests/test_backend/test_directx/test_parser.py
@@ -201,6 +201,18 @@ def test_assignment_ops_parsing():
output.redValue << 1; // Shift left by 1
output.redValue &= 0x3;
}
+
+ // Testing SHIFT_RIGHT (>>) operator on some condition
+ if (input.in_position.r == 0.25) {
+ uint redValue = asuint(output.out_color.r);
+ output.redValue ^= 0x1;
+ output.out_color.r = asfloat(redValue);
+
+ output.redValue |= 0x2;
+ // Applying shift left operation
+ output.redValue >> 1; // Shift left by 1
+ output.redValue &= 0x3;
+ }
// Testing BITWISE_XOR (^) operator on some condition
if (input.in_position.r == 0.5) {
diff --git a/tests/test_translator/test_codegen/test_directx_codegen.py b/tests/test_translator/test_codegen/test_directx_codegen.py
index ca816a2..6779c1c 100644
--- a/tests/test_translator/test_codegen/test_directx_codegen.py
+++ b/tests/test_translator/test_codegen/test_directx_codegen.py
@@ -596,5 +596,38 @@ def test_bitwise_and_operator():
pytest.fail("Bitwise AND codegen not implemented")
+def test_double_data_type():
+ code = """
+ shader DoubleShader {
+ vertex {
+ input double position;
+ output double vDouble;
+
+ void main() {
+ vDouble = position * 2.0;
+ gl_Position = vec4(vDouble, 1.0);
+ }
+ }
+
+ fragment {
+ input double vDouble;
+ output vec4 fragColor;
+
+ void main() {
+ fragColor = vec4(vDouble, vDouble, vDouble, 1.0);
+ }
+ }
+ }
+ """
+ try:
+ tokens = tokenize_code(code)
+ ast = parse_code(tokens)
+ generated_code = generate_code(ast)
+ print(generated_code)
+ assert "double" in generated_code
+ except SyntaxError:
+ pytest.fail("Double data type not supported.")
+
+
if __name__ == "__main__":
pytest.main()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 2
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/CrossGL/crosstl.git@c4325a413d735e2bd8a5e46ccff907c0fec13260#egg=crosstl
exceptiongroup==1.2.2
gast==0.6.0
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
| name: crosstl
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- gast==0.6.0
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/crosstl
| [
"tests/test_backend/test_directx/test_codegen.py::test_assignment_ops_codegen",
"tests/test_backend/test_directx/test_parser.py::test_assignment_ops_parsing"
] | [
"tests/test_translator/test_codegen/test_directx_codegen.py::test_double_data_type"
] | [
"tests/test_backend/test_directx/test_codegen.py::test_struct_codegen",
"tests/test_backend/test_directx/test_codegen.py::test_if_codegen",
"tests/test_backend/test_directx/test_codegen.py::test_for_codegen",
"tests/test_backend/test_directx/test_codegen.py::test_while_codegen",
"tests/test_backend/test_directx/test_codegen.py::test_do_while_codegen",
"tests/test_backend/test_directx/test_codegen.py::test_else_codegen",
"tests/test_backend/test_directx/test_codegen.py::test_function_call_codegen",
"tests/test_backend/test_directx/test_codegen.py::test_else_if_codegen",
"tests/test_backend/test_directx/test_codegen.py::test_bitwise_ops_codgen",
"tests/test_backend/test_directx/test_lexer.py::test_struct_tokenization",
"tests/test_backend/test_directx/test_lexer.py::test_if_tokenization",
"tests/test_backend/test_directx/test_lexer.py::test_for_tokenization",
"tests/test_backend/test_directx/test_lexer.py::test_else_tokenization",
"tests/test_backend/test_directx/test_lexer.py::test_function_call_tokenization",
"tests/test_backend/test_directx/test_lexer.py::test_else_if_tokenization",
"tests/test_backend/test_directx/test_lexer.py::test_assignment_ops_tokenization",
"tests/test_backend/test_directx/test_lexer.py::test_bitwise_or_tokenization",
"tests/test_backend/test_directx/test_parser.py::test_struct_parsing",
"tests/test_backend/test_directx/test_parser.py::test_if_parsing",
"tests/test_backend/test_directx/test_parser.py::test_for_parsing",
"tests/test_backend/test_directx/test_parser.py::test_while_parsing",
"tests/test_backend/test_directx/test_parser.py::test_do_while_parsing",
"tests/test_backend/test_directx/test_parser.py::test_else_parsing",
"tests/test_backend/test_directx/test_parser.py::test_function_call_parsing",
"tests/test_backend/test_directx/test_parser.py::test_else_if_parsing",
"tests/test_backend/test_directx/test_parser.py::test_bitwise_ops_parsing",
"tests/test_translator/test_codegen/test_directx_codegen.py::test_struct",
"tests/test_translator/test_codegen/test_directx_codegen.py::test_if_statement",
"tests/test_translator/test_codegen/test_directx_codegen.py::test_for_statement",
"tests/test_translator/test_codegen/test_directx_codegen.py::test_else_if_statement",
"tests/test_translator/test_codegen/test_directx_codegen.py::test_function_call",
"tests/test_translator/test_codegen/test_directx_codegen.py::test_assignment_or_operator",
"tests/test_translator/test_codegen/test_directx_codegen.py::test_assignment_modulus_operator",
"tests/test_translator/test_codegen/test_directx_codegen.py::test_assignment_xor_operator",
"tests/test_translator/test_codegen/test_directx_codegen.py::test_assignment_shift_operators",
"tests/test_translator/test_codegen/test_directx_codegen.py::test_bitwise_operators",
"tests/test_translator/test_codegen/test_directx_codegen.py::test_bitwise_and_operator"
] | [] | Apache License 2.0 | 20,484 | 555 | [
"crosstl/src/backend/DirectX/DirectxLexer.py",
"crosstl/src/backend/DirectX/DirectxParser.py"
] |
|
tskit-dev__tsdate-449 | 3c08becf500dac3a422376126e70314064c360fe | 2024-12-14 11:34:53 | a64a3238fc10ca98595c49d120f5cd2aee518ead | diff --git a/tsdate/core.py b/tsdate/core.py
index f6c796c..f4ecac1 100644
--- a/tsdate/core.py
+++ b/tsdate/core.py
@@ -87,11 +87,12 @@ class EstimationMethod:
allow_unary=None,
record_provenance=None,
constr_iterations=None,
+ set_metadata=None,
progress=None,
# Deprecated params
return_posteriors=None,
):
- # Set up all the generic params describe in the tsdate.date function, and define
+ # Set up all the generic params described in the tsdate.date function, and define
# priors if not passed-in already
if return_posteriors is not None:
raise ValueError(
@@ -106,6 +107,7 @@ class EstimationMethod:
self.recombination_rate = recombination_rate
self.return_fit = return_fit
self.return_likelihood = return_likelihood
+ self.set_metadata = set_metadata
self.pbar = progress
self.time_units = "generations" if time_units is None else time_units
if record_provenance is None:
@@ -206,7 +208,7 @@ class EstimationMethod:
# Add posterior mean and variance to node/mutation metadata
meta_timing = time.time()
self.set_time_metadata(
- nodes, node_mean_t, node_var_t, schemas.default_node_schema, overwrite=True
+ nodes, node_mean_t, node_var_t, schemas.default_node_schema
)
self.set_time_metadata(
mutations, mut_mean_t, mut_var_t, schemas.default_mutation_schema
@@ -226,40 +228,44 @@ class EstimationMethod:
)
return tables.tree_sequence()
- def set_time_metadata(self, table, mean, var, default_schema, overwrite=False):
- if var is not None:
+ def set_time_metadata(self, table, mean, var, default_schema):
+ # Try to set metadata: if we fail, clear metadata, reset schema, and try again
+ def _time_md_array(table, mean, var):
+ # Return an array of metadata dicts, or raise an error if
+ # schema is None or metadata is not valid
+ schema = table.metadata_schema
+ if schema.schema is None:
+ raise tskit.MetadataEncodingError("No schema set")
+ if len(table.metadata) > 0:
+ md_iter = (row.metadata for row in table)
+ else:
+ md_iter = ({} for _ in range(table.num_rows)) # no decoding needed
+ metadata_array = []
+ for metadata_dict, mn, vr in zip(md_iter, mean, var):
+ metadata_dict.update((("mn", mn), ("vr", vr)))
+ metadata_array.append(schema.validate_and_encode_row(metadata_dict))
+ return metadata_array
+
+ if self.set_metadata is False or var is None:
+ return # no md to set (e.g. outside maximization method)
+ assert len(mean) == len(var) == table.num_rows
+ try:
+ table.packset_metadata(_time_md_array(table, mean, var))
+ except (tskit.MetadataEncodingError, tskit.MetadataValidationError) as e:
table_name = type(table).__name__
- assert len(mean) == len(var) == table.num_rows
- if table.metadata_schema.schema is None or overwrite:
- if len(table.metadata) == 0 or overwrite:
- table.metadata_schema = default_schema
- md_iter = ({} for _ in range(table.num_rows))
- # For speed, assume we don't need to validate
- encoder = table.metadata_schema.encode_row
- logger.info(f"Set metadata schema on {table_name}")
- else:
+ if len(table.metadata) > 0 or table.metadata_schema.schema is not None:
+ if not self.set_metadata:
logger.warning(
- f"Could not set metadata on {table_name}: "
- "data already exists with no schema"
+ f"Could not set time metadata on {table_name} "
+ f"(force this by specifying `set_metadata=True`): {e}"
)
return
- else:
- md_iter = (
- table.metadata_schema.decode_row(md)
- for md in tskit.unpack_bytes(table.metadata, table.metadata_offset)
- )
- encoder = table.metadata_schema.validate_and_encode_row
- # TODO: could try to add to the existing schema if it's compatible
- metadata_array = []
- try:
- # wrap entire loop in try/except so metadata is either all set or not
- for metadata_dict, mn, vr in zip(md_iter, mean, var):
- metadata_dict.update((("mn", mn), ("vr", vr)))
- # validate and replace
- metadata_array.append(encoder(metadata_dict))
- table.packset_metadata(metadata_array)
- except tskit.MetadataValidationError as e:
- logger.warning(f"Could not set time metadata in {table_name}: {e}")
+ else:
+ logger.info(f"Clearing metadata from {table_name}")
+ table.drop_metadata()
+ logger.info(f"Setting metadata schema on {table_name}")
+ table.metadata_schema = default_schema
+ table.packset_metadata(_time_md_array(table, mean, var))
def parse_result(self, result, epsilon):
# Construct the tree sequence to return and add other stuff we might want to
@@ -876,6 +882,7 @@ def date(
time_units=None,
method=None,
constr_iterations=None,
+ set_metadata=None,
return_fit=None,
return_likelihood=None,
allow_unary=None,
@@ -922,12 +929,18 @@ def date(
:param string method: What estimation method to use. See
:data:`~tsdate.core.estimation_methods` for possible values.
If ``None`` (default) the "variational_gamma" method is currently chosen.
- :param bool return_fit: If ``True``, instead of returning just a dated tree
- sequence, return a tuple of ``(dated_ts, fit)``.
- Default: None, treated as False.
:param int constr_iterations: The maximum number of constrained least
squares iterations to use prior to forcing positive branch lengths.
Default: None, treated as 0.
+ :param bool set_metadata: Should unconstrained times be stored in table metadata,
+ in the form of ``"mn"`` (mean) and ``"vr"`` (variance) fields? If ``False``,
+ do not store metadata. If ``True``, force metadata to be set (if no schema
+ is set or the schema is incompatible, clear existing metadata in the relevant
+ tables and set a new schema). If ``None`` (default), only set metadata if
+ the existing schema allows (this may overwrite existing ``"mn"`` and ``"vr"``
+ fields) or if existing metadata is empty, otherwise issue a warning.
+ :param bool return_fit: If ``True``, instead of just a dated tree sequence,
+ return a tuple of ``(dated_ts, fit)``. Default: None, treated as False.
:param bool return_likelihood: If ``True``, return the log marginal likelihood
from the inside algorithm in addition to the dated tree sequence. If
``return_fit`` is also ``True``, then the marginal likelihood
@@ -963,6 +976,7 @@ def date(
return_fit=return_fit,
return_likelihood=return_likelihood,
allow_unary=allow_unary,
+ set_metadata=set_metadata,
record_provenance=record_provenance,
**kwargs,
)
| "set_metadata" parameter
Linked to #203 and https://github.com/tskit-dev/tsdate/issues/237#issuecomment-1438714926. At the moment, `tsdate`, as well as changing node times, also changes node metadata to add (unconstrained) means and variances. It would be useful to have the ability to (a) not add to metadata and (b) specify whether or not node metadata should be stomped on, if it exists.
Since this is the only metdata that tsdate touches, I suggest that as a first step, we could have a parameter `set_metadata`, `save_metadata`, `store_metadata` or even `force_metadata` with the following values:
* `False`: do not touch any metadata columns in any tables
* `True`: always add metadata to the node column, changing the schema and overwriting any existing node metadata
* `None` (default): try to append the existing node metadata, overwriting the tsdate-specific fields if they exist. A minor wrinkle is if there is binary node metadata (i.e. without a schema) and the metadata column is not empty (see https://github.com/tskit-dev/tsdate/issues/203#issuecomment-1438679651). In this case we won't know how to adjust the node metadata properly, so we should either raise an error or emit a warning. I suggest we should raise a warning, but it would be a pain to have run tsdate all the way through only to raise an error at the end, so we could check before running the main tsdate routine, and raise the error early?
| tskit-dev/tsdate | diff --git a/tests/test_accuracy.py b/tests/test_accuracy.py
index 8232475..57aeb09 100644
--- a/tests/test_accuracy.py
+++ b/tests/test_accuracy.py
@@ -109,11 +109,6 @@ class TestAccuracy:
mu = sim_mutations_parameters["rate"]
dts = tsdate.inside_outside(ts, population_size=Ne, mutation_rate=mu)
- # make sure we can read node metadata - old tsdate versions didn't set a schema
- if dts.table_metadata_schemas.node.schema is None:
- tables = dts.dump_tables()
- tables.nodes.metadata_schema = tskit.MetadataSchema.permissive_json()
- dts = tables.tree_sequence()
# Only test nonsample node times
nonsamples = np.ones(ts.num_nodes, dtype=bool)
diff --git a/tests/test_inference.py b/tests/test_inference.py
index cec220a..d96ece8 100644
--- a/tests/test_inference.py
+++ b/tests/test_inference.py
@@ -478,6 +478,13 @@ class TestVariational:
with pytest.raises(ValueError, match="Maximum number of EP iterations"):
tsdate.variational_gamma(self.ts, mutation_rate=5, max_iterations=-1)
+ def test_no_set_metadata(self):
+ assert len(self.ts.tables.mutations.metadata) == 0
+ assert len(self.ts.tables.nodes.metadata) == 0
+ ts = tsdate.variational_gamma(self.ts, mutation_rate=1e-8, set_metadata=False)
+ assert len(ts.tables.mutations.metadata) == 0
+ assert len(ts.tables.nodes.metadata) == 0
+
def test_no_existing_mutation_metadata(self):
# Currently only the variational_gamma method embeds mutation metadata
ts = tsdate.variational_gamma(self.ts, mutation_rate=1e-8)
@@ -565,3 +572,7 @@ class TestVariational:
dts = tsdate.variational_gamma(ts, mutation_rate=1e-8)
assert "Could not set" in caplog.text
assert len(dts.tables.mutations.metadata) == 0
+ assert len(dts.tables.nodes.metadata) > 0
+ dts = tsdate.variational_gamma(ts, mutation_rate=1e-8, set_metadata=True)
+ assert len(dts.tables.mutations.metadata) > 0
+ assert len(dts.tables.nodes.metadata) > 0
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
asciitree==0.3.3
attrs==25.3.0
backports.tarfile==1.2.0
build==1.2.2.post1
certifi==2025.1.31
cffi==1.17.1
cfgv==3.4.0
charset-normalizer==3.4.1
contourpy==1.3.0
coverage==7.8.0
cryptography==44.0.2
cycler==0.12.1
daiquiri==3.2.5.1
demes==0.2.3
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
fasteners==0.19
filelock==3.18.0
fonttools==4.57.0
humanize==4.12.2
id==1.5.0
identify==2.6.9
idna==3.10
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig==2.1.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
keyring==25.6.0
kiwisolver==1.4.7
llvmlite==0.43.0
lmdb==1.6.2
markdown-it-py==3.0.0
matplotlib==3.9.4
mdurl==0.1.2
more-itertools==10.6.0
mpmath==1.3.0
msprime==1.3.3
newick==1.10.0
nh3==0.2.21
nodeenv==1.9.1
numba==0.60.0
numcodecs==0.12.1
numdifftools==0.9.41
numpy==2.0.2
packaging==24.2
pillow==11.1.0
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
psutil==7.0.0
pycparser==2.22
Pygments==2.19.1
pyparsing==3.2.3
pyproject_hooks==1.2.0
pytest==8.3.5
pytest-cov==6.1.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
python-json-logger==3.3.0
PyYAML==6.0.2
readme_renderer==44.0
referencing==0.36.2
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
rpds-py==0.24.0
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
ruff==0.11.3
scipy==1.13.1
SecretStorage==3.3.3
six==1.17.0
sortedcontainers==2.4.0
tomli==2.2.1
tqdm==4.67.1
-e git+https://github.com/tskit-dev/tsdate.git@3c08becf500dac3a422376126e70314064c360fe#egg=tsdate
tsinfer==0.4.1
tskit==0.6.2
twine==6.1.0
typing_extensions==4.13.1
urllib3==2.3.0
virtualenv==20.30.0
zarr==2.18.2
zipp==3.21.0
| name: tsdate
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- asciitree==0.3.3
- attrs==25.3.0
- backports-tarfile==1.2.0
- build==1.2.2.post1
- certifi==2025.1.31
- cffi==1.17.1
- cfgv==3.4.0
- charset-normalizer==3.4.1
- contourpy==1.3.0
- coverage==7.8.0
- cryptography==44.0.2
- cycler==0.12.1
- daiquiri==3.2.5.1
- demes==0.2.3
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- fasteners==0.19
- filelock==3.18.0
- fonttools==4.57.0
- humanize==4.12.2
- id==1.5.0
- identify==2.6.9
- idna==3.10
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- keyring==25.6.0
- kiwisolver==1.4.7
- llvmlite==0.43.0
- lmdb==1.6.2
- markdown-it-py==3.0.0
- matplotlib==3.9.4
- mdurl==0.1.2
- more-itertools==10.6.0
- mpmath==1.3.0
- msprime==1.3.3
- newick==1.10.0
- nh3==0.2.21
- nodeenv==1.9.1
- numba==0.60.0
- numcodecs==0.12.1
- numdifftools==0.9.41
- numpy==2.0.2
- packaging==24.2
- pillow==11.1.0
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- psutil==7.0.0
- pycparser==2.22
- pygments==2.19.1
- pyparsing==3.2.3
- pyproject-hooks==1.2.0
- pytest==8.3.5
- pytest-cov==6.1.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- python-json-logger==3.3.0
- pyyaml==6.0.2
- readme-renderer==44.0
- referencing==0.36.2
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- rpds-py==0.24.0
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.12
- ruff==0.11.3
- scipy==1.13.1
- secretstorage==3.3.3
- six==1.17.0
- sortedcontainers==2.4.0
- tomli==2.2.1
- tqdm==4.67.1
- tsdate==0.2.2.dev36+g3c08bec
- tsinfer==0.4.1
- tskit==0.6.2
- twine==6.1.0
- typing-extensions==4.13.1
- urllib3==2.3.0
- virtualenv==20.30.0
- zarr==2.18.2
- zipp==3.21.0
prefix: /opt/conda/envs/tsdate
| [
"tests/test_inference.py::TestVariational::test_no_set_metadata",
"tests/test_inference.py::TestVariational::test_incompatible_schema_mutation_metadata"
] | [] | [
"tests/test_accuracy.py::TestAccuracy::test_basic[one_tree-0.98601-0.98601-0.97719-0.97719]",
"tests/test_accuracy.py::TestAccuracy::test_basic[few_trees-0.9822-0.9822-0.97744-0.97744]",
"tests/test_accuracy.py::TestAccuracy::test_basic[many_trees-0.93449-0.93449-0.964547-0.964547]",
"tests/test_accuracy.py::TestAccuracy::test_scaling[0.1]",
"tests/test_accuracy.py::TestAccuracy::test_scaling[1]",
"tests/test_accuracy.py::TestAccuracy::test_scaling[400]",
"tests/test_accuracy.py::TestAccuracy::test_piecewise_scaling[-1.0-0.76]",
"tests/test_accuracy.py::TestAccuracy::test_piecewise_scaling[-0.9-0.79]",
"tests/test_accuracy.py::TestAccuracy::test_piecewise_scaling[-0.8-0.82]",
"tests/test_accuracy.py::TestAccuracy::test_piecewise_scaling[-0.7-0.85]",
"tests/test_accuracy.py::TestAccuracy::test_piecewise_scaling[-0.6-0.89]",
"tests/test_accuracy.py::TestAccuracy::test_piecewise_scaling[-0.5-0.94]",
"tests/test_accuracy.py::TestAccuracy::test_piecewise_scaling[-0.4-0.99]",
"tests/test_accuracy.py::TestAccuracy::test_piecewise_scaling[-0.3-1.05]",
"tests/test_accuracy.py::TestAccuracy::test_piecewise_scaling[-0.2-1.12]",
"tests/test_accuracy.py::TestAccuracy::test_piecewise_scaling[-0.1-1.21]",
"tests/test_accuracy.py::TestAccuracy::test_piecewise_scaling[0.0-1.32]",
"tests/test_inference.py::TestConstants::test_matches_tsinfer_consts",
"tests/test_inference.py::TestPrebuilt::test_invalid_method_failure",
"tests/test_inference.py::TestPrebuilt::test_no_mutations_failure",
"tests/test_inference.py::TestPrebuilt::test_no_population_size",
"tests/test_inference.py::TestPrebuilt::test_no_mutation",
"tests/test_inference.py::TestPrebuilt::test_not_needed_population_size",
"tests/test_inference.py::TestPrebuilt::test_bad_population_size",
"tests/test_inference.py::TestPrebuilt::test_both_ne_and_population_size_specified",
"tests/test_inference.py::TestPrebuilt::test_inside_outside_dangling_failure",
"tests/test_inference.py::TestPrebuilt::test_variational_gamma_dangling",
"tests/test_inference.py::TestPrebuilt::test_inside_outside_unary_failure",
"tests/test_inference.py::TestPrebuilt::test_allow_unary[ts0-variational_gamma]",
"tests/test_inference.py::TestPrebuilt::test_allow_unary[ts0-inside_outside]",
"tests/test_inference.py::TestPrebuilt::test_allow_unary[ts0-maximization]",
"tests/test_inference.py::TestPrebuilt::test_allow_unary[ts1-variational_gamma]",
"tests/test_inference.py::TestPrebuilt::test_allow_unary[ts1-inside_outside]",
"tests/test_inference.py::TestPrebuilt::test_allow_unary[ts1-maximization]",
"tests/test_inference.py::TestPrebuilt::test_fails_with_recombination[None-logarithmic]",
"tests/test_inference.py::TestPrebuilt::test_fails_with_recombination[None-linear]",
"tests/test_inference.py::TestPrebuilt::test_fails_with_recombination[1-logarithmic]",
"tests/test_inference.py::TestPrebuilt::test_fails_with_recombination[1-linear]",
"tests/test_inference.py::TestPrebuilt::test_default_time_units",
"tests/test_inference.py::TestPrebuilt::test_default_alternative_time_units",
"tests/test_inference.py::TestPrebuilt::test_deprecated_return_posteriors",
"tests/test_inference.py::TestPrebuilt::test_return_fit",
"tests/test_inference.py::TestPrebuilt::test_no_maximization_posteriors",
"tests/test_inference.py::TestPrebuilt::test_discretised_posteriors",
"tests/test_inference.py::TestPrebuilt::test_variational_node_posteriors",
"tests/test_inference.py::TestPrebuilt::test_variational_mutation_posteriors",
"tests/test_inference.py::TestPrebuilt::test_variational_mean_edge_logconst",
"tests/test_inference.py::TestPrebuilt::test_marginal_likelihood",
"tests/test_inference.py::TestPrebuilt::test_intervals",
"tests/test_inference.py::TestSimulated::test_simple_sim[variational_gamma-0]",
"tests/test_inference.py::TestSimulated::test_simple_sim[variational_gamma-5]",
"tests/test_inference.py::TestSimulated::test_simple_sim[inside_outside-0]",
"tests/test_inference.py::TestSimulated::test_simple_sim[inside_outside-5]",
"tests/test_inference.py::TestSimulated::test_simple_sim[maximization-0]",
"tests/test_inference.py::TestSimulated::test_simple_sim[maximization-5]",
"tests/test_inference.py::TestSimulated::test_simple_sim_larger_example",
"tests/test_inference.py::TestSimulated::test_linear_space",
"tests/test_inference.py::TestSimulated::test_with_unary",
"tests/test_inference.py::TestSimulated::test_fails_multi_root",
"tests/test_inference.py::TestSimulated::test_non_contemporaneous",
"tests/test_inference.py::TestSimulated::test_mutation_times",
"tests/test_inference.py::TestInferred::test_simple_sim[variational_gamma-0]",
"tests/test_inference.py::TestInferred::test_simple_sim[variational_gamma-5]",
"tests/test_inference.py::TestInferred::test_simple_sim[inside_outside-0]",
"tests/test_inference.py::TestInferred::test_simple_sim[inside_outside-5]",
"tests/test_inference.py::TestInferred::test_simple_sim[maximization-0]",
"tests/test_inference.py::TestInferred::test_simple_sim[maximization-5]",
"tests/test_inference.py::TestVariational::test_binary",
"tests/test_inference.py::TestVariational::test_polytomy",
"tests/test_inference.py::TestVariational::test_inferred",
"tests/test_inference.py::TestVariational::test_bad_arguments",
"tests/test_inference.py::TestVariational::test_no_existing_mutation_metadata",
"tests/test_inference.py::TestVariational::test_existing_mutation_metadata",
"tests/test_inference.py::TestVariational::test_existing_byte_mutation_metadata",
"tests/test_inference.py::TestVariational::test_existing_struct_mutation_metadata"
] | [] | MIT License | 20,485 | 1,719 | [
"tsdate/core.py"
] |
|
streamlink__streamlink-6338 | fac5d8f740952485ce2c094e2826ed358fbe90fd | 2024-12-14 15:33:05 | 2bcdafc67824a60d65bce8bea65d019061c4e360 | diff --git a/src/streamlink/stream/dash/dash.py b/src/streamlink/stream/dash/dash.py
index e90df739..35e13520 100644
--- a/src/streamlink/stream/dash/dash.py
+++ b/src/streamlink/stream/dash/dash.py
@@ -9,7 +9,6 @@ from contextlib import contextmanager, suppress
from datetime import datetime
from time import time
from typing import Any
-from urllib.parse import urlparse, urlunparse
from requests import Response
@@ -253,11 +252,7 @@ class DASHStream(Stream):
manifest: str = res.text
url: str = res.url
- urlp = list(urlparse(url))
- urlp[2], _ = urlp[2].rsplit("/", 1)
- base_url: str = urlunparse(urlp)
-
- return manifest, dict(url=url, base_url=base_url)
+ return manifest, dict(url=url, base_url=url)
@staticmethod
def parse_mpd(manifest: str, mpd_params: Mapping[str, Any]) -> MPD:
| stream.dash: incorrect base_url (BaseURL) handling after fetching manifest
### Checklist
- [x] This is a bug report and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [x] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [x] [I have checked the list of open and recently closed bug reports](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22bug%22)
- [x] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Collaboration
- [x] [I will provide feedback should a pull request be opened with a fix for the problem](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#pull-request-feedback)
### Streamlink version
7.0.0+31.gfac5d8f7
### Description
## The Issue
Hi! Recently I'm testing MPEG-DASH with streamlink. After encountering a bunch of HTTP 404 errors when downloading segments, I've started to read the code, and then fortunately I think I've found the solution.
My command lines:
```console
$ ffmpeg -i test.mkv -c copy -f dash test.mpd
$ ffprobe test.mkv
ffprobe version n6.1.1-7-ga267d4ad4c-20240222 Copyright (c) 2007-2023 the FFmpeg developers
built with gcc 13.2.0 (crosstool-NG 1.25.0.232_c175b21)
configuration: ...
...
Input #0, matroska,webm, from 'test.mkv':
Metadata:
COMPATIBLE_BRANDS: isomiso2avc1mp41
MAJOR_BRAND : isom
MINOR_VERSION : 512
ENCODER : Lavf60.16.100
Duration: 00:05:00.17, start: 0.000000, bitrate: 1620 kb/s
Stream #0:0: Video: h264 (Main), yuv420p(progressive), 1920x1080, 30 fps, 30 tbr, 1k tbn (default)
Metadata:
HANDLER_NAME : VideoHandler
VENDOR_ID : [0][0][0][0]
DURATION : 00:05:00.166000000
Stream #0:1: Audio: aac (LC), 44100 Hz, stereo, fltp (default)
Metadata:
HANDLER_NAME : SoundHandler
VENDOR_ID : [0][0][0][0]
DURATION : 00:05:00.019000000
```
The MPD file:
<details>
<summary>the content of "test.mpd"</summary>
```xml
<?xml version="1.0" encoding="utf-8"?>
<MPD xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="urn:mpeg:dash:schema:mpd:2011"
xmlns:xlink="http://www.w3.org/1999/xlink"
xsi:schemaLocation="urn:mpeg:DASH:schema:MPD:2011 http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-DASH_schema_files/DASH-MPD.xsd"
profiles="urn:mpeg:dash:profile:isoff-live:2011"
type="static"
mediaPresentationDuration="PT5M0.1S"
maxSegmentDuration="PT5.0S"
minBufferTime="PT12.0S">
<ProgramInformation>
</ProgramInformation>
<ServiceDescription id="0">
</ServiceDescription>
<Period id="0" start="PT0.0S">
<AdaptationSet id="0" contentType="video" startWithSAP="1" segmentAlignment="true" bitstreamSwitching="true" frameRate="30/1" maxWidth="1920" maxHeight="1080" par="16:9">
<Representation id="0" mimeType="video/mp4" codecs="avc1.4d4029" bandwidth="1428902" width="1920" height="1080" sar="1:1">
<SegmentTemplate timescale="10000000" initialization="init-stream$RepresentationID$.m4s" media="chunk-stream$RepresentationID$-$Number%05d$.m4s" startNumber="1">
<SegmentTimeline>
<S t="0" d="60000000" r="26" />
<S d="59990000" />
<S d="60000000" r="21" />
<S d="1670000" />
</SegmentTimeline>
</SegmentTemplate>
</Representation>
</AdaptationSet>
<AdaptationSet id="1" contentType="audio" startWithSAP="1" segmentAlignment="true" bitstreamSwitching="true">
<Representation id="1" mimeType="audio/mp4" codecs="mp4a.40.2" bandwidth="194151" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2" />
<SegmentTemplate timescale="44100" initialization="init-stream$RepresentationID$.m4s" media="chunk-stream$RepresentationID$-$Number%05d$.m4s" startNumber="1">
<SegmentTimeline>
<S t="750" d="221174" />
<S d="221184" r="57" />
<S d="180224" />
</SegmentTimeline>
</SegmentTemplate>
</Representation>
</AdaptationSet>
</Period>
</MPD>
```
</details>
## My Solution
https://github.com/streamlink/streamlink/blob/fac5d8f740952485ce2c094e2826ed358fbe90fd/src/streamlink/stream/dash/dash.py#L256-L260
```diff
- urlp[2], _ = urlp[2].rsplit("/", 1)
+ urlp[2] = urlp[2].rsplit("/", 1)[0] + "/"
```
And also add a correspoding test:
```Python
def test_baseurl_urljoin(self):
with xml("dash/test_baseurl_urljoin.mpd") as mpd_xml:
mpd = MPD(mpd_xml, base_url="https://foo/bar", url="https://foo/bar/manifest.mpd")
# ... ^
# No trailing slash |
assert segment_urls == [
[
("empty-baseurl", "absolute-segments", "https://foo/absolute/init_video_5000kbps.m4s"),
("empty-baseurl", "absolute-segments", "https://foo/absolute/media_video_5000kbps-1.m4s"),
],
[
("empty-baseurl", "relative-segments", "https://foo/relative/init_video_5000kbps.m4s"),
("empty-baseurl", "relative-segments", "https://foo/relative/media_video_5000kbps-1.m4s"),
```
END.
### Debug log
```text
$ python -m streamlink --loglevel debug http://127.0.0.1:8080/mpeg-dash-test/vod/test.mpd best
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.11.9
[cli][debug] OpenSSL: OpenSSL 3.0.13 30 Jan 2024
[cli][debug] Streamlink: 7.0.0+31.gfac5d8f7
[cli][debug] Dependencies:
[cli][debug] certifi: 2024.6.2
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.4
[cli][debug] pycountry: 24.6.1
[cli][debug] pycryptodome: 3.20.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.32.3
[cli][debug] trio: 0.25.1
[cli][debug] trio-websocket: 0.11.1
[cli][debug] typing-extensions: 4.12.2
[cli][debug] urllib3: 2.2.1
[cli][debug] websocket-client: 1.8.0
[cli][debug] Arguments:
[cli][debug] url=http://127.0.0.1:8080/mpeg-dash-test/vod/test.mpd
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin dash for URL http://127.0.0.1:8080/mpeg-dash-test/vod/test.mpd
[plugins.dash][debug] URL=http://127.0.0.1:8080/mpeg-dash-test/vod/test.mpd; params={}
[utils.l10n][debug] Language code: en_US
[stream.dash][debug] Available languages for DASH audio streams: NONE (using: n/a)
[cli][info] Available streams: 1080p (worst, best)
[cli][info] Opening stream: 1080p (dash)
[cli][info] Starting player: C:\Program Files\VideoLAN\VLC\vlc.exe
[stream.dash][debug] Opening DASH reader for: ('0', '0', '0') - video/mp4
[stream.dash][debug] Opening DASH reader for: ('0', '1', '1') - audio/mp4
[stream.ffmpegmux][debug] ffmpeg version n6.1.1-7-ga267d4ad4c-20240222 Copyright (c) 2000-2023 the FFmpeg developers
built with gcc 13.2.0 (crosstool-NG 1.25.0.232_c175b21)
configuration: ...
...
[stream.dash.manifest][debug] Generating segment timeline for static playlist: ('0', '0', '0')
[stream.dash][debug] video/mp4 segment initialization: downloading (1970-01-01T00:00:00.000000Z / 2024-12-14T14:13:11.643145Z)
[utils.named_pipe][info] Creating pipe streamlinkpipe-247056-1-9449
[utils.named_pipe][info] Creating pipe streamlinkpipe-247056-2-3498
[stream.dash.manifest][debug] Generating segment timeline for static playlist: ('0', '1', '1')
[stream.ffmpegmux][debug] ffmpeg command: B:\dev\ffmpeg-n6.1.1-7-ga267d4ad4c-win64-gpl-6.1\bin\ffmpeg.EXE -y -nostats -loglevel info -i \\.\pipe\streamlinkpipe-247056-1-9449 -i \\.\pipe\streamlinkpipe-247056-2-3498 -c:v copy -c:a copy -copyts -f matroska pipe:1
[stream.dash][debug] audio/mp4 segment initialization: downloading (1970-01-01T00:00:00.000000Z / 2024-12-14T14:13:11.648832Z)
[stream.ffmpegmux][debug] Starting copy to pipe: \\.\pipe\streamlinkpipe-247056-1-9449
[stream.ffmpegmux][debug] Starting copy to pipe: \\.\pipe\streamlinkpipe-247056-2-3498
[cli][debug] Pre-buffering 8192 bytes
[stream.dash][error] video/mp4 segment initialization: failed (Unable to open URL: http://127.0.0.1:8080/mpeg-dash-test/init-stream0.m4s (404 Client Error: Not Found for url: http://127.0.0.1:8080/mpeg-dash-test/init-stream0.m4s))
[stream.dash][debug] video/mp4 segment 1: downloading (1970-01-01T00:00:00.000000Z / 2024-12-14T14:13:13.762230Z)
[stream.dash][error] audio/mp4 segment initialization: failed (Unable to open URL: http://127.0.0.1:8080/mpeg-dash-test/init-stream1.m4s (404 Client Error: Not Found for url: http://127.0.0.1:8080/mpeg-dash-test/init-stream1.m4s))
[stream.dash][debug] audio/mp4 segment 1: downloading (1970-01-01T00:00:00.000000Z / 2024-12-14T14:13:13.768768Z)
[stream.dash][error] video/mp4 segment 1: failed (Unable to open URL: http://127.0.0.1:8080/mpeg-dash-test/chunk-stream0-00001.m4s (404 Client Error: Not Found for url: http://127.0.0.1:8080/mpeg-dash-test/chunk-stream0-00001.m4s))
[stream.dash][debug] video/mp4 segment 2: downloading (1970-01-01T00:00:00.000000Z / 2024-12-14T14:13:15.883276Z)
[stream.dash][error] audio/mp4 segment 1: failed (Unable to open URL: http://127.0.0.1:8080/mpeg-dash-test/chunk-stream1-00001.m4s (404 Client Error: Not Found for url: http://127.0.0.1:8080/mpeg-dash-test/chunk-stream1-00001.m4s))
...
``` | streamlink/streamlink | diff --git a/tests/stream/dash/test_dash.py b/tests/stream/dash/test_dash.py
index db17ae46..e54f73ef 100644
--- a/tests/stream/dash/test_dash.py
+++ b/tests/stream/dash/test_dash.py
@@ -72,7 +72,7 @@ class TestDASHStreamParseManifest:
mpd.return_value = Mock(periods=[Mock(adaptationSets=[adaptationset])])
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
- assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
+ assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test/manifest.mpd")]
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
def test_audio_only(self, session: Streamlink, mpd: Mock):
@@ -86,7 +86,7 @@ class TestDASHStreamParseManifest:
mpd.return_value = Mock(periods=[Mock(adaptationSets=[adaptationset])])
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
- assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
+ assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test/manifest.mpd")]
assert sorted(streams.keys()) == sorted(["a128k", "a256k"])
@pytest.mark.parametrize(
@@ -143,7 +143,7 @@ class TestDASHStreamParseManifest:
with_video_only=with_video_only,
with_audio_only=with_audio_only,
)
- assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
+ assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test/manifest.mpd")]
assert list(streams.keys()) == expected
def test_audio_single(self, session: Streamlink, mpd: Mock):
@@ -158,7 +158,7 @@ class TestDASHStreamParseManifest:
mpd.return_value = Mock(periods=[Mock(adaptationSets=[adaptationset])])
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
- assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
+ assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test/manifest.mpd")]
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
def test_audio_multi(self, session: Streamlink, mpd: Mock):
@@ -174,7 +174,7 @@ class TestDASHStreamParseManifest:
mpd.return_value = Mock(periods=[Mock(adaptationSets=[adaptationset])])
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
- assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
+ assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test/manifest.mpd")]
assert sorted(streams.keys()) == sorted(["720p+a128k", "1080p+a128k", "720p+a256k", "1080p+a256k"])
def test_audio_multi_lang(self, session: Streamlink, mpd: Mock):
@@ -190,7 +190,7 @@ class TestDASHStreamParseManifest:
mpd.return_value = Mock(periods=[Mock(adaptationSets=[adaptationset])])
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
- assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
+ assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test/manifest.mpd")]
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
assert getattr(streams["720p"].audio_representation, "lang", None) == "en"
assert getattr(streams["1080p"].audio_representation, "lang", None) == "en"
@@ -208,7 +208,7 @@ class TestDASHStreamParseManifest:
mpd.return_value = Mock(periods=[Mock(adaptationSets=[adaptationset])])
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
- assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
+ assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test/manifest.mpd")]
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
assert getattr(streams["720p"].audio_representation, "lang", None) == "eng"
assert getattr(streams["1080p"].audio_representation, "lang", None) == "eng"
@@ -225,7 +225,7 @@ class TestDASHStreamParseManifest:
mpd.return_value = Mock(periods=[Mock(adaptationSets=[adaptationset])])
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
- assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
+ assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test/manifest.mpd")]
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
assert getattr(streams["720p"].audio_representation, "lang", None) == "en_no_voice"
assert getattr(streams["1080p"].audio_representation, "lang", None) == "en_no_voice"
@@ -245,7 +245,7 @@ class TestDASHStreamParseManifest:
mpd.return_value = Mock(periods=[Mock(adaptationSets=[adaptationset])])
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
- assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
+ assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test/manifest.mpd")]
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
assert getattr(streams["720p"].audio_representation, "lang", None) == "es"
assert getattr(streams["1080p"].audio_representation, "lang", None) == "es"
@@ -264,7 +264,7 @@ class TestDASHStreamParseManifest:
mpd.return_value = Mock(periods=[Mock(adaptationSets=[adaptationset])])
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
- assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
+ assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test/manifest.mpd")]
assert sorted(streams.keys()) == sorted(["720p", "1080p", "1080p_alt", "1080p_alt2"])
# Verify the fix for https://github.com/streamlink/streamlink/issues/4217
@@ -280,7 +280,7 @@ class TestDASHStreamParseManifest:
mpd.return_value = Mock(periods=[Mock(adaptationSets=[adaptationset])])
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
- assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
+ assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test/manifest.mpd")]
assert getattr(streams["1080p"].video_representation, "bandwidth", None) == pytest.approx(128.0)
assert getattr(streams["1080p_alt"].video_representation, "bandwidth", None) == pytest.approx(64.0)
assert getattr(streams["1080p_alt2"].video_representation, "bandwidth", None) == pytest.approx(32.0)
@@ -319,10 +319,10 @@ class TestDASHStreamParseManifest:
# This test currently achieves nothing... (manifest fixture added in 7aada92)
def test_segments_number_time(self, session: Streamlink, mpd: Mock):
with xml("dash/test_9.mpd") as mpd_xml:
- mpd.return_value = MPD(mpd_xml, base_url="http://test", url="http://test/manifest.mpd")
+ mpd.return_value = MPD(mpd_xml, base_url="http://test/manifest.mpd", url="http://test/manifest.mpd")
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
- assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
+ assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test/manifest.mpd")]
assert list(streams.keys()) == ["480p"]
diff --git a/tests/stream/dash/test_manifest.py b/tests/stream/dash/test_manifest.py
index 77543a76..d82fd3ae 100644
--- a/tests/stream/dash/test_manifest.py
+++ b/tests/stream/dash/test_manifest.py
@@ -552,7 +552,55 @@ class TestMPDParser:
],
]
- def test_baseurl_urljoin(self):
+ def test_baseurl_urljoin_no_trailing_slash(self):
+ with xml("dash/test_baseurl_urljoin.mpd") as mpd_xml:
+ mpd = MPD(mpd_xml, base_url="https://foo/bar", url="https://test/manifest.mpd")
+
+ segment_urls = [
+ [
+ (period.id, adaptationset.id, segment.uri)
+ for segment in itertools.islice(representation.segments(), 2)
+ ]
+ for period in mpd.periods
+ for adaptationset in period.adaptationSets
+ for representation in adaptationset.representations
+ ] # fmt: skip
+ assert segment_urls == [
+ [
+ ("empty-baseurl", "absolute-segments", "https://foo/absolute/init_video_5000kbps.m4s"),
+ ("empty-baseurl", "absolute-segments", "https://foo/absolute/media_video_5000kbps-1.m4s"),
+ ],
+ [
+ ("empty-baseurl", "relative-segments", "https://foo/relative/init_video_5000kbps.m4s"),
+ ("empty-baseurl", "relative-segments", "https://foo/relative/media_video_5000kbps-1.m4s"),
+ ],
+ [
+ ("baseurl-with-scheme", "absolute-segments", "https://host/absolute/init_video_5000kbps.m4s"),
+ ("baseurl-with-scheme", "absolute-segments", "https://host/absolute/media_video_5000kbps-1.m4s"),
+ ],
+ [
+ ("baseurl-with-scheme", "relative-segments", "https://host/path/relative/init_video_5000kbps.m4s"),
+ ("baseurl-with-scheme", "relative-segments", "https://host/path/relative/media_video_5000kbps-1.m4s"),
+ ],
+ [
+ ("absolute-baseurl", "absolute-segments", "https://foo/absolute/init_video_5000kbps.m4s"),
+ ("absolute-baseurl", "absolute-segments", "https://foo/absolute/media_video_5000kbps-1.m4s"),
+ ],
+ [
+ ("absolute-baseurl", "relative-segments", "https://foo/path/relative/init_video_5000kbps.m4s"),
+ ("absolute-baseurl", "relative-segments", "https://foo/path/relative/media_video_5000kbps-1.m4s"),
+ ],
+ [
+ ("relative-baseurl", "absolute-segments", "https://foo/absolute/init_video_5000kbps.m4s"),
+ ("relative-baseurl", "absolute-segments", "https://foo/absolute/media_video_5000kbps-1.m4s"),
+ ],
+ [
+ ("relative-baseurl", "relative-segments", "https://foo/path/relative/init_video_5000kbps.m4s"),
+ ("relative-baseurl", "relative-segments", "https://foo/path/relative/media_video_5000kbps-1.m4s"),
+ ],
+ ]
+
+ def test_baseurl_urljoin_with_trailing_slash(self):
with xml("dash/test_baseurl_urljoin.mpd") as mpd_xml:
mpd = MPD(mpd_xml, base_url="https://foo/bar/", url="https://test/manifest.mpd")
@@ -600,6 +648,7 @@ class TestMPDParser:
],
]
+ def test_baseurl_urljoin_empty(self):
with xml("dash/test_baseurl_urljoin.mpd") as mpd_xml:
mpd = MPD(mpd_xml, base_url="", url="https://test/manifest.mpd")
@@ -647,7 +696,7 @@ class TestMPDParser:
],
]
- def test_nested_baseurls(self):
+ def test_baseurl_nested(self):
with xml("dash/test_baseurl_nested.mpd") as mpd_xml:
mpd = MPD(mpd_xml, base_url="https://foo/", url="https://test/manifest.mpd")
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 7.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | async-generator==1.10
attrs==25.3.0
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
freezegun==1.5.1
h11==0.14.0
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
isodate==0.7.2
lxml==5.3.1
lxml-stubs==0.5.1
mypy==1.13.0
mypy-extensions==1.0.0
orjson==3.10.16
outcome==1.3.0.post0
packaging==24.2
pluggy==1.5.0
pycountry==24.6.1
pycryptodome==3.22.0
PySocks==1.7.1
pytest==8.3.5
pytest-cov==6.0.0
pytest-trio==0.8.0
python-dateutil==2.9.0.post0
requests==2.32.3
requests-mock==1.12.1
ruff==0.8.3
six==1.17.0
sniffio==1.3.1
sortedcontainers==2.4.0
-e git+https://github.com/streamlink/streamlink.git@fac5d8f740952485ce2c094e2826ed358fbe90fd#egg=streamlink
tomli==2.2.1
trio==0.29.0
trio-typing==0.10.0
trio-websocket==0.12.2
types-freezegun==1.1.10
types-requests==2.32.0.20250328
types-setuptools==78.1.0.20250329
types-urllib3==1.26.25.14
typing_extensions==4.13.0
urllib3==2.3.0
versioningit==3.1.2
websocket-client==1.8.0
wsproto==1.2.0
zipp==3.21.0
| name: streamlink
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- async-generator==1.10
- attrs==25.3.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- freezegun==1.5.1
- h11==0.14.0
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- isodate==0.7.2
- lxml==5.3.1
- lxml-stubs==0.5.1
- mypy==1.13.0
- mypy-extensions==1.0.0
- orjson==3.10.16
- outcome==1.3.0.post0
- packaging==24.2
- pluggy==1.5.0
- pycountry==24.6.1
- pycryptodome==3.22.0
- pysocks==1.7.1
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-trio==0.8.0
- python-dateutil==2.9.0.post0
- requests==2.32.3
- requests-mock==1.12.1
- ruff==0.8.3
- six==1.17.0
- sniffio==1.3.1
- sortedcontainers==2.4.0
- streamlink==7.0.0+31.gfac5d8f7
- tomli==2.2.1
- trio==0.29.0
- trio-typing==0.10.0
- trio-websocket==0.12.2
- types-freezegun==1.1.10
- types-requests==2.32.0.20250328
- types-setuptools==78.1.0.20250329
- types-urllib3==1.26.25.14
- typing-extensions==4.13.0
- urllib3==2.3.0
- versioningit==3.1.2
- websocket-client==1.8.0
- wsproto==1.2.0
- zipp==3.21.0
prefix: /opt/conda/envs/streamlink
| [
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_video_only",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_audio_only",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_with_videoaudio_only[Only",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_with_videoaudio_only[With",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_audio_single",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_audio_multi",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_audio_multi_lang",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_audio_multi_lang_alpha3",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_audio_invalid_lang",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_audio_multi_lang_locale",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_duplicated_resolutions",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_duplicated_resolutions_sorted_bandwidth",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_segments_number_time"
] | [] | [
"tests/stream/dash/test_dash.py::test_logger_name",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_parse_fail[ParseError-None]",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_parse_fail[None-MPDParsingError]",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_contentprotection[ContentProtection",
"tests/stream/dash/test_dash.py::TestDASHStreamParseManifest::test_string",
"tests/stream/dash/test_dash.py::TestDASHStreamOpen::test_stream_open_video_only",
"tests/stream/dash/test_dash.py::TestDASHStreamOpen::test_stream_open_video_audio",
"tests/stream/dash/test_dash.py::TestDASHStreamWorker::test_dynamic_reload",
"tests/stream/dash/test_dash.py::TestDASHStreamWorker::test_static",
"tests/stream/dash/test_dash.py::TestDASHStreamWorker::test_static_refresh_wait[0]",
"tests/stream/dash/test_dash.py::TestDASHStreamWorker::test_static_refresh_wait[204.32]",
"tests/stream/dash/test_manifest.py::TestSegment::test_name[segmentdata0-initialization]",
"tests/stream/dash/test_manifest.py::TestSegment::test_name[segmentdata1-123]",
"tests/stream/dash/test_manifest.py::TestSegment::test_name[segmentdata2-bar]",
"tests/stream/dash/test_manifest.py::TestSegment::test_name[segmentdata3-123]",
"tests/stream/dash/test_manifest.py::TestSegment::test_name[segmentdata4-bar]",
"tests/stream/dash/test_manifest.py::TestSegment::test_name[segmentdata5-bar]",
"tests/stream/dash/test_manifest.py::TestSegment::test_name[segmentdata6-baz.qux]",
"tests/stream/dash/test_manifest.py::TestSegment::test_name[segmentdata7-baz.qux]",
"tests/stream/dash/test_manifest.py::TestSegment::test_available_in[available_at0-97445.123456]",
"tests/stream/dash/test_manifest.py::TestSegment::test_available_in[available_at1-0.0]",
"tests/stream/dash/test_manifest.py::TestSegment::test_available_in[available_at2-0.0]",
"tests/stream/dash/test_manifest.py::TestSegment::test_availability",
"tests/stream/dash/test_manifest.py::TestMPDParsers::test_bool_str",
"tests/stream/dash/test_manifest.py::TestMPDParsers::test_type",
"tests/stream/dash/test_manifest.py::TestMPDParsers::test_duration",
"tests/stream/dash/test_manifest.py::TestMPDParsers::test_datetime",
"tests/stream/dash/test_manifest.py::TestMPDParsers::test_segment_template",
"tests/stream/dash/test_manifest.py::TestMPDParsers::test_frame_rate",
"tests/stream/dash/test_manifest.py::TestMPDParsers::test_timedelta",
"tests/stream/dash/test_manifest.py::TestMPDParsers::test_range",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_suggested_presentation_delay[minBufferTime",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_no_segment_list_or_template",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_segments_number_time",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_segments_static_number",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_segments_dynamic_time",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_segments_dynamic_number[Without",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_segments_dynamic_number[With",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_static_no_publish_time",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_dynamic_no_publish_time_with_timeline[with-start]",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_dynamic_no_publish_time_with_timeline[without-start]",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_segment_list",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_dynamic_segment_list_continued",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_dynamic_segment_list_no_duration",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_dynamic_timeline_continued",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_tsegment_t_is_none_1895",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_bitrate_rounded",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_duplicated_resolutions",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_segments_static_periods_duration",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_segments_byterange",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_baseurl_urljoin_no_trailing_slash",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_baseurl_urljoin_with_trailing_slash",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_baseurl_urljoin_empty",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_baseurl_nested",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_timeline_ids",
"tests/stream/dash/test_manifest.py::TestMPDParser::test_get_representation"
] | [] | BSD 2-Clause "Simplified" License | 20,486 | 259 | [
"src/streamlink/stream/dash/dash.py"
] |
|
roboflow__supervision-1739 | 6f55d9de9e0f5469f11f768fb993de133f7d5af3 | 2024-12-14 20:05:15 | 6f55d9de9e0f5469f11f768fb993de133f7d5af3 | diff --git a/supervision/detection/utils.py b/supervision/detection/utils.py
index a2cbd87b..0d5ec475 100644
--- a/supervision/detection/utils.py
+++ b/supervision/detection/utils.py
@@ -720,25 +720,71 @@ def move_masks(
masks (npt.NDArray[np.bool_]): A 3D array of binary masks corresponding to the
predictions. Shape: `(N, H, W)`, where N is the number of predictions, and
H, W are the dimensions of each mask.
- offset (npt.NDArray[np.int32]): An array of shape `(2,)` containing non-negative
- int values `[dx, dy]`.
+ offset (npt.NDArray[np.int32]): An array of shape `(2,)` containing int values
+ `[dx, dy]`. Supports both positive and negative values for bidirectional
+ movement.
resolution_wh (Tuple[int, int]): The width and height of the desired mask
resolution.
Returns:
(npt.NDArray[np.bool_]) repositioned masks, optionally padded to the specified
shape.
- """
- if offset[0] < 0 or offset[1] < 0:
- raise ValueError(f"Offset values must be non-negative integers. Got: {offset}")
+ Examples:
+ ```python
+ import numpy as np
+ import supervision as sv
+ mask = np.array([[[False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False]]], dtype=bool)
+
+ offset = np.array([1, 1])
+ sv.move_masks(mask, offset, resolution_wh=(4, 4))
+ # array([[[False, False, False, False],
+ # [False, False, False, False],
+ # [False, False, True, True],
+ # [False, False, True, True]]], dtype=bool)
+
+ offset = np.array([-2, 2])
+ sv.move_masks(mask, offset, resolution_wh=(4, 4))
+ # array([[[False, False, False, False],
+ # [False, False, False, False],
+ # [False, False, False, False],
+ # [True, False, False, False]]], dtype=bool)
+ ```
+ """
mask_array = np.full((masks.shape[0], resolution_wh[1], resolution_wh[0]), False)
- mask_array[
- :,
- offset[1] : masks.shape[1] + offset[1],
- offset[0] : masks.shape[2] + offset[0],
- ] = masks
+
+ if offset[0] < 0:
+ source_x_start = -offset[0]
+ source_x_end = min(masks.shape[2], resolution_wh[0] - offset[0])
+ destination_x_start = 0
+ destination_x_end = min(resolution_wh[0], masks.shape[2] + offset[0])
+ else:
+ source_x_start = 0
+ source_x_end = min(masks.shape[2], resolution_wh[0] - offset[0])
+ destination_x_start = offset[0]
+ destination_x_end = offset[0] + source_x_end - source_x_start
+
+ if offset[1] < 0:
+ source_y_start = -offset[1]
+ source_y_end = min(masks.shape[1], resolution_wh[1] - offset[1])
+ destination_y_start = 0
+ destination_y_end = min(resolution_wh[1], masks.shape[1] + offset[1])
+ else:
+ source_y_start = 0
+ source_y_end = min(masks.shape[1], resolution_wh[1] - offset[1])
+ destination_y_start = offset[1]
+ destination_y_end = offset[1] + source_y_end - source_y_start
+
+ if source_x_end > source_x_start and source_y_end > source_y_start:
+ mask_array[
+ :,
+ destination_y_start:destination_y_end,
+ destination_x_start:destination_x_end,
+ ] = masks[:, source_y_start:source_y_end, source_x_start:source_x_end]
return mask_array
| `move_masks` only supports movement in positive direction
If you compare the code of `move_masks`, `move_detections` and `move_oriented_boxes`, you'll find that only mask code restricts offset direction:
```python
if offset[0] < 0 or offset[1] < 0:
raise ValueError(f"Offset values must be non-negative integers. Got: {offset}")
```
It should be possible to move masks in either direction, even if it results in cropping.
To complete this:
- [ ] Change the code so masks can be moved with negative offset
- [ ] Create a unit test suite for `move_masks`
---
It would help us immensely and speed up the review process if you could create a [Colab](https://colab.research.google.com/) showcasing the changes, but for this task it is optional. You may use the [Starter Template](https://colab.research.google.com/drive/1rin7WrS-UvVIe-_Gfxmu-yVslGphOq89?usp=sharing). | roboflow/supervision | diff --git a/test/detection/test_utils.py b/test/detection/test_utils.py
index 87e50f6a..d93c72c8 100644
--- a/test/detection/test_utils.py
+++ b/test/detection/test_utils.py
@@ -16,6 +16,7 @@ from supervision.detection.utils import (
merge_data,
merge_metadata,
move_boxes,
+ move_masks,
process_roboflow_result,
scale_boxes,
xcycwh_to_xyxy,
@@ -442,6 +443,268 @@ def test_move_boxes(
assert np.array_equal(result, expected_result)
[email protected](
+ "masks, offset, resolution_wh, expected_result, exception",
+ [
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([0, 0]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([-1, -1]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [True, True, False, False],
+ [True, True, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([-2, -2]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [True, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([-3, -3]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([-2, -1]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [True, False, False, False],
+ [True, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([-1, -2]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [True, True, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([-2, 2]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ [True, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([3, 3]),
+ (4, 4),
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ (
+ np.array(
+ [
+ [
+ [False, False, False, False],
+ [False, True, True, False],
+ [False, True, True, False],
+ [False, False, False, False],
+ ]
+ ],
+ dtype=bool,
+ ),
+ np.array([3, 3]),
+ (6, 6),
+ np.array(
+ [
+ [
+ [False, False, False, False, False, False],
+ [False, False, False, False, False, False],
+ [False, False, False, False, False, False],
+ [False, False, False, False, False, False],
+ [False, False, False, False, True, True],
+ [False, False, False, False, True, True],
+ ]
+ ],
+ dtype=bool,
+ ),
+ DoesNotRaise(),
+ ),
+ ],
+)
+def test_move_masks(
+ masks: np.ndarray,
+ offset: np.ndarray,
+ resolution_wh: Tuple[int, int],
+ expected_result: np.ndarray,
+ exception: Exception,
+) -> None:
+ with exception:
+ result = move_masks(masks=masks, offset=offset, resolution_wh=resolution_wh)
+ np.testing.assert_array_equal(result, expected_result)
+
+
@pytest.mark.parametrize(
"xyxy, factor, expected_result, exception",
[
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.26 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "supervision",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | Bottleneck @ file:///croot/bottleneck_1731058641041/work
Brotli @ file:///croot/brotli-split_1736182456865/work
certifi @ file:///croot/certifi_1738623731865/work/certifi
charset-normalizer @ file:///croot/charset-normalizer_1721748349566/work
contourpy @ file:///croot/contourpy_1738160616259/work
cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
defusedxml @ file:///tmp/build/80754af9/defusedxml_1615228127516/work
exceptiongroup==1.2.2
fonttools @ file:///croot/fonttools_1737039080035/work
idna @ file:///croot/idna_1714398848350/work
importlib_resources @ file:///croot/importlib_resources-suite_1720641103994/work
iniconfig==2.1.0
kiwisolver @ file:///croot/kiwisolver_1672387140495/work
matplotlib==3.9.2
numexpr @ file:///croot/numexpr_1730215937391/work
numpy @ file:///croot/numpy_and_numpy_base_1725470312869/work/dist/numpy-2.0.1-cp39-cp39-linux_x86_64.whl#sha256=b8c18bbfe185fbdff23024458e4b8ffbe2040e705abd5fb6cda1ef9d20b5974d
opencv-python==4.10.0
opencv-python-headless==4.10.0
packaging @ file:///croot/packaging_1734472117206/work
pandas @ file:///croot/pandas_1732735089971/work/dist/pandas-2.2.3-cp39-cp39-linux_x86_64.whl#sha256=0a51ed2e81ab863e3d00ed6c5049192ce578ecb38fb467d2f9a6585d3c25f666
pandas-stubs @ file:///croot/pandas-stubs_1676319782725/work
pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1712154461189/work
pluggy==1.5.0
pyparsing @ file:///croot/pyparsing_1731445506121/work
PySocks @ file:///tmp/build/80754af9/pysocks_1605305812635/work
pytest==8.3.5
python-dateutil @ file:///croot/python-dateutil_1716495738603/work
pytz @ file:///croot/pytz_1713974312559/work
PyYAML @ file:///croot/pyyaml_1728657952215/work
requests @ file:///croot/requests_1730999120400/work
scipy @ file:///croot/scipy_1733756309941/work/dist/scipy-1.13.1-cp39-cp39-linux_x86_64.whl#sha256=3b247b926209f2d9f719ebae39faf3ff891b2596150ed8f8349adfc3eb19441c
six @ file:///tmp/build/80754af9/six_1644875935023/work
-e git+https://github.com/roboflow/supervision.git@6f55d9de9e0f5469f11f768fb993de133f7d5af3#egg=supervision
tomli==2.2.1
tqdm @ file:///croot/tqdm_1738943501192/work
types-pytz @ file:///croot/types-pytz_1665514243479/work
tzdata @ file:///croot/python-tzdata_1690578112552/work
unicodedata2 @ file:///croot/unicodedata2_1736541023050/work
urllib3 @ file:///croot/urllib3_1737133630106/work
zipp @ file:///croot/zipp_1732630741423/work
| name: supervision
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- aom=3.9.1=hac33072_0
- blas=2.131=openblas
- blas-devel=3.9.0=31_h1ea3ea9_openblas
- bottleneck=1.4.2=py39ha9d4c09_0
- brotli-python=1.0.9=py39h6a678d5_9
- bzip2=1.0.8=h5eee18b_6
- c-ares=1.19.1=h5eee18b_0
- ca-certificates=2025.2.25=h06a4308_0
- cairo=1.18.0=h3faef2a_0
- certifi=2025.1.31=py39h06a4308_0
- charset-normalizer=3.3.2=pyhd3eb1b0_0
- contourpy=1.2.1=py39hdb19cb5_1
- cycler=0.11.0=pyhd3eb1b0_0
- dav1d=1.2.1=h5eee18b_0
- defusedxml=0.7.1=pyhd3eb1b0_0
- expat=2.6.4=h6a678d5_0
- ffmpeg=6.1.1=gpl_he44c6f3_112
- font-ttf-dejavu-sans-mono=2.37=hd3eb1b0_0
- font-ttf-inconsolata=2.001=hcb22688_0
- font-ttf-source-code-pro=2.030=hd3eb1b0_0
- font-ttf-ubuntu=0.83=h8b1ccd4_0
- fontconfig=2.14.2=h14ed4e7_0
- fonts-anaconda=1=h8fa9717_0
- fonts-conda-ecosystem=1=hd3eb1b0_0
- fonttools=4.55.3=py39h5eee18b_0
- freeglut=3.4.0=h6a678d5_0
- freetype=2.12.1=h4a9f257_0
- fribidi=1.0.10=h7b6447c_0
- gettext=0.21.0=hedfda30_2
- gmp=6.3.0=h6a678d5_0
- gnutls=3.7.9=hb077bed_0
- graphite2=1.3.14=h295c915_1
- gst-plugins-base=1.22.3=h64f963a_1
- gst-plugins-good=1.22.3=h5681806_1
- gstreamer=1.22.3=hb6d4d65_1
- harfbuzz=8.5.0=hfac3d4d_0
- hdf5=1.14.3=nompi_hdf9ad27_105
- icu=73.2=h59595ed_0
- idna=3.7=py39h06a4308_0
- imath=3.1.11=hfc55251_0
- importlib_resources=6.4.0=py39h06a4308_0
- jasper=4.2.4=h536e39c_0
- kiwisolver=1.4.4=py39h6a678d5_0
- krb5=1.20.1=h143b758_1
- lame=3.100=h7b6447c_0
- lcms2=2.16=hb7c19ff_0
- ld_impl_linux-64=2.40=h12ee557_0
- lerc=4.0.0=h6a678d5_0
- libabseil=20240116.2=cxx17_h6a678d5_0
- libaec=1.1.3=h6a678d5_0
- libasprintf=0.23.1=h8e693c7_0
- libass=0.17.1=h8fe9dca_1
- libblas=3.9.0=31_h59b9bed_openblas
- libcblas=3.9.0=31_he106b2a_openblas
- libcurl=8.12.1=hc9e6f67_0
- libdeflate=1.20=hd590300_0
- libdrm=2.4.124=hb9d3cd8_0
- libedit=3.1.20230828=h5eee18b_0
- libev=4.33=h7f8727e_1
- libexpat=2.6.4=h5888daf_0
- libffi=3.4.4=h6a678d5_1
- libgcc=14.2.0=h767d61c_2
- libgcc-ng=14.2.0=h69a702a_2
- libgettextpo=0.23.1=h5888daf_0
- libgfortran=14.2.0=h69a702a_2
- libgfortran-ng=14.2.0=h69a702a_2
- libgfortran5=14.2.0=hf1ad2bd_2
- libglib=2.80.2=hf974151_0
- libglu=9.0.0=hf484d3e_1
- libgomp=14.2.0=h767d61c_2
- libiconv=1.18=h4ce23a2_1
- libidn2=2.3.4=h5eee18b_0
- libjpeg-turbo=3.0.3=h5eee18b_0
- liblapack=3.9.0=31_h7ac8fdf_openblas
- liblapacke=3.9.0=31_he2f377e_openblas
- libnghttp2=1.57.0=h2d74bed_0
- libogg=1.3.5=h27cfd23_1
- libopenblas=0.3.29=pthreads_h94d23a6_0
- libopencv=4.10.0=headless_py39h1857587_0
- libopenvino=2024.1.0=h2da1b83_7
- libopenvino-auto-batch-plugin=2024.1.0=hb045406_7
- libopenvino-auto-plugin=2024.1.0=hb045406_7
- libopenvino-hetero-plugin=2024.1.0=h5c03a75_7
- libopenvino-intel-cpu-plugin=2024.1.0=h2da1b83_7
- libopenvino-intel-gpu-plugin=2024.1.0=h2da1b83_7
- libopenvino-intel-npu-plugin=2024.1.0=he02047a_7
- libopenvino-ir-frontend=2024.1.0=h5c03a75_7
- libopenvino-onnx-frontend=2024.1.0=h07e8aee_7
- libopenvino-paddle-frontend=2024.1.0=h07e8aee_7
- libopenvino-pytorch-frontend=2024.1.0=he02047a_7
- libopenvino-tensorflow-frontend=2024.1.0=h39126c6_7
- libopenvino-tensorflow-lite-frontend=2024.1.0=he02047a_7
- libopus=1.3.1=h5eee18b_1
- libpciaccess=0.18=hd590300_0
- libpng=1.6.43=h2797004_0
- libprotobuf=4.25.3=he621ea3_0
- libssh2=1.11.1=h251f7ec_0
- libstdcxx=14.2.0=h8f9b012_2
- libstdcxx-ng=14.2.0=h4852527_2
- libtasn1=4.19.0=h5eee18b_0
- libtiff=4.6.0=h1dd3fc0_3
- libunistring=0.9.10=h27cfd23_0
- libuuid=2.38.1=h0b41bf4_0
- libva=2.21.0=h4ab18f5_2
- libvorbis=1.3.7=h7b6447c_0
- libvpx=1.14.1=hac33072_0
- libwebp-base=1.5.0=h851e524_0
- libxcb=1.15=h7f8727e_0
- libxml2=2.13.5=hfdd30dd_0
- libzlib=1.2.13=h4ab18f5_6
- lz4-c=1.9.4=h6a678d5_1
- matplotlib-base=3.9.2=py39hbfdbfaf_1
- mpg123=1.30.0=h6a678d5_1000
- ncurses=6.4=h6a678d5_0
- nettle=3.9.1=h7ab15ed_0
- numexpr=2.10.1=py39hd28fd6d_0
- numpy=2.0.1=py39heeff2f4_1
- numpy-base=2.0.1=py39h8a23956_1
- ocl-icd=2.3.2=h5eee18b_1
- openblas=0.3.29=pthreads_h6ec200e_0
- opencv=4.10.0=headless_py39h4799dc9_0
- openexr=3.2.2=haf962dd_1
- openh264=2.4.1=h59595ed_0
- openjpeg=2.5.2=he7f1fd0_0
- openssl=3.4.1=h7b32b05_0
- p11-kit=0.24.1=hc5aa10d_0
- packaging=24.2=py39h06a4308_0
- pandas=2.2.3=py39h6a678d5_0
- pandas-stubs=1.5.3.230203=py39h06a4308_0
- pcre2=10.43=hcad00b1_0
- pillow=10.3.0=py39h90c7501_0
- pip=25.0=py39h06a4308_0
- pixman=0.44.2=h29eaf8c_0
- pugixml=1.14=h59595ed_0
- py-opencv=4.10.0=headless_py39h1c9db89_0
- pybind11-abi=4=hd3eb1b0_1
- pyparsing=3.2.0=py39h06a4308_0
- pysocks=1.7.1=py39h06a4308_0
- python=3.9.21=he870216_1
- python-dateutil=2.9.0post0=py39h06a4308_2
- python-tzdata=2023.3=pyhd3eb1b0_0
- python_abi=3.9=2_cp39
- pytz=2024.1=py39h06a4308_0
- pyyaml=6.0.2=py39h5eee18b_0
- readline=8.2=h5eee18b_0
- requests=2.32.3=py39h06a4308_1
- scipy=1.13.1=py39heeff2f4_1
- setuptools=75.8.0=py39h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- snappy=1.2.1=h6a678d5_0
- sqlite=3.45.3=h5eee18b_0
- svt-av1=2.1.0=hac33072_0
- tbb=2022.0.0=hdb19cb5_0
- tk=8.6.14=h39e8969_0
- tqdm=4.67.1=py39h2f386ee_0
- types-pytz=2022.4.0.0=py39h06a4308_1
- tzdata=2025a=h04d1e81_0
- unicodedata2=15.1.0=py39h5eee18b_1
- urllib3=2.3.0=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- x264=1!164.3095=h166bdaf_2
- x265=3.5=h924138e_3
- xorg-fixesproto=5.0=hb9d3cd8_1003
- xorg-kbproto=1.0.7=hb9d3cd8_1003
- xorg-libice=1.1.2=hb9d3cd8_0
- xorg-libsm=1.2.6=he73a12e_0
- xorg-libx11=1.8.9=h8ee46fc_0
- xorg-libxext=1.3.4=h0b41bf4_2
- xorg-libxfixes=5.0.3=h7f98852_1004
- xorg-libxrender=0.9.11=hd590300_0
- xorg-renderproto=0.11.1=hb9d3cd8_1003
- xorg-xextproto=7.3.0=hb9d3cd8_1004
- xorg-xproto=7.0.31=h27cfd23_1007
- xz=5.6.4=h5eee18b_1
- yaml=0.2.5=h7b6447c_0
- zipp=3.21.0=py39h06a4308_0
- zlib=1.2.13=h4ab18f5_6
- zstd=1.5.6=hc292b87_0
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- pluggy==1.5.0
- pytest==8.3.5
- supervision==0.26.0rc1
- tomli==2.2.1
prefix: /opt/conda/envs/supervision
| [
"test/detection/test_utils.py::test_move_masks[masks1-offset1-resolution_wh1-expected_result1-exception1]",
"test/detection/test_utils.py::test_move_masks[masks2-offset2-resolution_wh2-expected_result2-exception2]",
"test/detection/test_utils.py::test_move_masks[masks3-offset3-resolution_wh3-expected_result3-exception3]",
"test/detection/test_utils.py::test_move_masks[masks4-offset4-resolution_wh4-expected_result4-exception4]",
"test/detection/test_utils.py::test_move_masks[masks5-offset5-resolution_wh5-expected_result5-exception5]",
"test/detection/test_utils.py::test_move_masks[masks6-offset6-resolution_wh6-expected_result6-exception6]",
"test/detection/test_utils.py::test_move_masks[masks7-offset7-resolution_wh7-expected_result7-exception7]",
"test/detection/test_utils.py::test_move_masks[masks8-offset8-resolution_wh8-expected_result8-exception8]"
] | [] | [
"test/detection/test_utils.py::test_clip_boxes[xyxy0-resolution_wh0-expected_result0]",
"test/detection/test_utils.py::test_clip_boxes[xyxy1-resolution_wh1-expected_result1]",
"test/detection/test_utils.py::test_clip_boxes[xyxy2-resolution_wh2-expected_result2]",
"test/detection/test_utils.py::test_clip_boxes[xyxy3-resolution_wh3-expected_result3]",
"test/detection/test_utils.py::test_clip_boxes[xyxy4-resolution_wh4-expected_result4]",
"test/detection/test_utils.py::test_clip_boxes[xyxy5-resolution_wh5-expected_result5]",
"test/detection/test_utils.py::test_filter_polygons_by_area[polygons0-None-None-expected_result0-exception0]",
"test/detection/test_utils.py::test_filter_polygons_by_area[polygons1-50-None-expected_result1-exception1]",
"test/detection/test_utils.py::test_filter_polygons_by_area[polygons2-None-50-expected_result2-exception2]",
"test/detection/test_utils.py::test_filter_polygons_by_area[polygons3-200-None-expected_result3-exception3]",
"test/detection/test_utils.py::test_filter_polygons_by_area[polygons4-None-200-expected_result4-exception4]",
"test/detection/test_utils.py::test_filter_polygons_by_area[polygons5-200-200-expected_result5-exception5]",
"test/detection/test_utils.py::test_filter_polygons_by_area[polygons6-100-100-expected_result6-exception6]",
"test/detection/test_utils.py::test_filter_polygons_by_area[polygons7-400-400-expected_result7-exception7]",
"test/detection/test_utils.py::test_process_roboflow_result[roboflow_result0-expected_result0-exception0]",
"test/detection/test_utils.py::test_process_roboflow_result[roboflow_result1-expected_result1-exception1]",
"test/detection/test_utils.py::test_process_roboflow_result[roboflow_result2-expected_result2-exception2]",
"test/detection/test_utils.py::test_process_roboflow_result[roboflow_result3-expected_result3-exception3]",
"test/detection/test_utils.py::test_process_roboflow_result[roboflow_result4-expected_result4-exception4]",
"test/detection/test_utils.py::test_process_roboflow_result[roboflow_result5-expected_result5-exception5]",
"test/detection/test_utils.py::test_process_roboflow_result[roboflow_result6-expected_result6-exception6]",
"test/detection/test_utils.py::test_move_boxes[xyxy0-offset0-expected_result0-exception0]",
"test/detection/test_utils.py::test_move_boxes[xyxy1-offset1-expected_result1-exception1]",
"test/detection/test_utils.py::test_move_boxes[xyxy2-offset2-expected_result2-exception2]",
"test/detection/test_utils.py::test_move_boxes[xyxy3-offset3-expected_result3-exception3]",
"test/detection/test_utils.py::test_move_boxes[xyxy4-offset4-expected_result4-exception4]",
"test/detection/test_utils.py::test_move_masks[masks0-offset0-resolution_wh0-expected_result0-exception0]",
"test/detection/test_utils.py::test_scale_boxes[xyxy0-2.0-expected_result0-exception0]",
"test/detection/test_utils.py::test_scale_boxes[xyxy1-1.0-expected_result1-exception1]",
"test/detection/test_utils.py::test_scale_boxes[xyxy2-2.0-expected_result2-exception2]",
"test/detection/test_utils.py::test_scale_boxes[xyxy3-0.5-expected_result3-exception3]",
"test/detection/test_utils.py::test_scale_boxes[xyxy4-2.0-expected_result4-exception4]",
"test/detection/test_utils.py::test_calculate_masks_centroids[masks0-expected_result0-exception0]",
"test/detection/test_utils.py::test_calculate_masks_centroids[masks1-expected_result1-exception1]",
"test/detection/test_utils.py::test_calculate_masks_centroids[masks2-expected_result2-exception2]",
"test/detection/test_utils.py::test_calculate_masks_centroids[masks3-expected_result3-exception3]",
"test/detection/test_utils.py::test_calculate_masks_centroids[masks4-expected_result4-exception4]",
"test/detection/test_utils.py::test_merge_data[data_list0-expected_result0-exception0]",
"test/detection/test_utils.py::test_merge_data[data_list1-expected_result1-exception1]",
"test/detection/test_utils.py::test_merge_data[data_list2-expected_result2-exception2]",
"test/detection/test_utils.py::test_merge_data[data_list3-expected_result3-exception3]",
"test/detection/test_utils.py::test_merge_data[data_list4-expected_result4-exception4]",
"test/detection/test_utils.py::test_merge_data[data_list5-expected_result5-exception5]",
"test/detection/test_utils.py::test_merge_data[data_list6-expected_result6-exception6]",
"test/detection/test_utils.py::test_merge_data[data_list7-expected_result7-exception7]",
"test/detection/test_utils.py::test_merge_data[data_list8-expected_result8-exception8]",
"test/detection/test_utils.py::test_merge_data[data_list9-expected_result9-exception9]",
"test/detection/test_utils.py::test_merge_data[data_list10-expected_result10-exception10]",
"test/detection/test_utils.py::test_merge_data[data_list11-None-exception11]",
"test/detection/test_utils.py::test_merge_data[data_list12-expected_result12-exception12]",
"test/detection/test_utils.py::test_merge_data[data_list13-expected_result13-exception13]",
"test/detection/test_utils.py::test_merge_data[data_list14-expected_result14-exception14]",
"test/detection/test_utils.py::test_merge_data[data_list15-expected_result15-exception15]",
"test/detection/test_utils.py::test_merge_data[data_list16-None-exception16]",
"test/detection/test_utils.py::test_merge_data[data_list17-None-exception17]",
"test/detection/test_utils.py::test_merge_data[data_list18-None-exception18]",
"test/detection/test_utils.py::test_merge_data[data_list19-expected_result19-exception19]",
"test/detection/test_utils.py::test_merge_data[data_list20-None-exception20]",
"test/detection/test_utils.py::test_merge_data[data_list21-None-exception21]",
"test/detection/test_utils.py::test_merge_data[data_list22-None-exception22]",
"test/detection/test_utils.py::test_merge_data[data_list23-None-exception23]",
"test/detection/test_utils.py::test_get_data_item[data0-0-expected_result0-exception0]",
"test/detection/test_utils.py::test_get_data_item[data1-0-expected_result1-exception1]",
"test/detection/test_utils.py::test_get_data_item[data2-0-expected_result2-exception2]",
"test/detection/test_utils.py::test_get_data_item[data3-index3-expected_result3-exception3]",
"test/detection/test_utils.py::test_get_data_item[data4-index4-expected_result4-exception4]",
"test/detection/test_utils.py::test_get_data_item[data5--1-expected_result5-exception5]",
"test/detection/test_utils.py::test_get_data_item[data6--1-expected_result6-exception6]",
"test/detection/test_utils.py::test_get_data_item[data7-index7-expected_result7-exception7]",
"test/detection/test_utils.py::test_get_data_item[data8-index8-expected_result8-exception8]",
"test/detection/test_utils.py::test_get_data_item[data9-index9-expected_result9-exception9]",
"test/detection/test_utils.py::test_get_data_item[data10-index10-expected_result10-exception10]",
"test/detection/test_utils.py::test_get_data_item[data11-index11-expected_result11-exception11]",
"test/detection/test_utils.py::test_get_data_item[data12-index12-expected_result12-exception12]",
"test/detection/test_utils.py::test_get_data_item[data13-index13-expected_result13-exception13]",
"test/detection/test_utils.py::test_get_data_item[data14-0-expected_result14-exception14]",
"test/detection/test_utils.py::test_get_data_item[data15--1-expected_result15-exception15]",
"test/detection/test_utils.py::test_get_data_item[data16-index16-expected_result16-exception16]",
"test/detection/test_utils.py::test_contains_holes[mask0-False-exception0]",
"test/detection/test_utils.py::test_contains_holes[mask1-False-exception1]",
"test/detection/test_utils.py::test_contains_holes[mask2-False-exception2]",
"test/detection/test_utils.py::test_contains_holes[mask3-False-exception3]",
"test/detection/test_utils.py::test_contains_holes[mask4-True-exception4]",
"test/detection/test_utils.py::test_contains_holes[mask5-True-exception5]",
"test/detection/test_utils.py::test_contains_multiple_segments[mask0-4-False-exception0]",
"test/detection/test_utils.py::test_contains_multiple_segments[mask1-4-True-exception1]",
"test/detection/test_utils.py::test_contains_multiple_segments[mask2-4-False-exception2]",
"test/detection/test_utils.py::test_contains_multiple_segments[mask3-4-False-exception3]",
"test/detection/test_utils.py::test_contains_multiple_segments[mask4-4-False-exception4]",
"test/detection/test_utils.py::test_contains_multiple_segments[mask5-4-True-exception5]",
"test/detection/test_utils.py::test_contains_multiple_segments[mask6-8-False-exception6]",
"test/detection/test_utils.py::test_contains_multiple_segments[mask7-5-None-exception7]",
"test/detection/test_utils.py::test_xywh_to_xyxy[xywh0-expected_result0]",
"test/detection/test_utils.py::test_xywh_to_xyxy[xywh1-expected_result1]",
"test/detection/test_utils.py::test_xywh_to_xyxy[xywh2-expected_result2]",
"test/detection/test_utils.py::test_xywh_to_xyxy[xywh3-expected_result3]",
"test/detection/test_utils.py::test_xywh_to_xyxy[xywh4-expected_result4]",
"test/detection/test_utils.py::test_xywh_to_xyxy[xywh5-expected_result5]",
"test/detection/test_utils.py::test_xywh_to_xyxy[xywh6-expected_result6]",
"test/detection/test_utils.py::test_xcycwh_to_xyxy[xcycwh0-expected_result0]",
"test/detection/test_utils.py::test_xcycwh_to_xyxy[xcycwh1-expected_result1]",
"test/detection/test_utils.py::test_xcycwh_to_xyxy[xcycwh2-expected_result2]",
"test/detection/test_utils.py::test_xcycwh_to_xyxy[xcycwh3-expected_result3]",
"test/detection/test_utils.py::test_xcycwh_to_xyxy[xcycwh4-expected_result4]",
"test/detection/test_utils.py::test_xcycwh_to_xyxy[xcycwh5-expected_result5]",
"test/detection/test_utils.py::test_xcycwh_to_xyxy[xcycwh6-expected_result6]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list0-expected_result0-exception0]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list1-expected_result1-exception1]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list2-None-exception2]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list3-None-exception3]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list4-expected_result4-exception4]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list5-expected_result5-exception5]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list6-expected_result6-exception6]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list7-expected_result7-exception7]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list8-expected_result8-exception8]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list9-expected_result9-exception9]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list10-expected_result10-exception10]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list11-expected_result11-exception11]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list12-expected_result12-exception12]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list13-None-exception13]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list14-None-exception14]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list15-None-exception15]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list16-None-exception16]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list17-expected_result17-exception17]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list18-expected_result18-exception18]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list19-None-exception19]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list20-None-exception20]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list21-None-exception21]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list22-expected_result22-exception22]",
"test/detection/test_utils.py::test_merge_metadata[metadata_list23-None-exception23]"
] | [] | MIT License | 20,488 | 1,023 | [
"supervision/detection/utils.py"
] |
|
openforcefield__openff-nagl-165 | 92d3302570664bec4b4719d49bfa8f68ef4090df | 2024-12-16 08:51:08 | 350dcbd3a57334c7c82f5189081262a44bbe68f5 | diff --git a/openff/nagl/lookups.py b/openff/nagl/lookups.py
index 229a35e..9c7412b 100644
--- a/openff/nagl/lookups.py
+++ b/openff/nagl/lookups.py
@@ -137,8 +137,12 @@ class AtomPropertiesLookupTable(BaseLookupTable):
If the property value cannot be found for this molecule
"""
from openff.toolkit.topology import Molecule
+ from openff.toolkit.utils.exceptions import EmptyInChiError
- inchi_key = molecule.to_inchi(fixed_hydrogens=True)
+ try:
+ inchi_key = molecule.to_inchi(fixed_hydrogens=True)
+ except EmptyInChiError as e:
+ raise KeyError(e.msg)
try:
entry = self.properties[inchi_key]
except KeyError:
| Cannot assign partial charges to molecules with 1024+ atoms
## Expected behavior ##
<!-- A clear and concise description of what you want to do and what you think should happen. (Code to reproduce the behavior can be added below). -->
I'm not sure exactly what I expected for a maximum molecule size, but I would guess the network can handle molecules much larger than 1000 molecules.
## Actual behavior ##
<!-- What happened instead. Add as much detail as you can. Include (copy and paste) stack traces and any output. -->
As a non-expert of this codebase, it looks like this error stems from checking a lookup table which is keyed by InChI. The toolkit cannot create InChI of molecules more than 1034 atoms long. There might be other errors later in the process, I have yet to dig into this.
https://github.com/openforcefield/openff-toolkit/issues/1977
## Code to reproduce the behavior ##
<!-- Show us how to reproduce the failure. If you can, use trajectory files from the test data. Use the code snipped below as a starting point. -->
``` python
import time
import numpy
import openff.nagl
import openff.nagl_models
from openff.toolkit import Molecule
from openff.toolkit.utils.nagl_wrapper import NAGLToolkitWrapper
from openff.toolkit.utils.exceptions import EmptyInChiError
print(f"{openff.nagl.__version__=}")
print(f"{openff.nagl_models.__version__=}")
molecule = Molecule.from_smiles(341 * "C")
molecule.assign_partial_charges(
partial_charge_method="openff-gnn-am1bcc-0.1.0-rc.3.pt",
toolkit_registry=NAGLToolkitWrapper(),
)
```
Holy traceback, batman!
```python
openff.nagl.__version__='0.5.0'
openff.nagl_models.__version__='0.3.0'
Warning: OECreateInChI: InChI only supports molecules with between 1 and 1023 atoms! (note: large molecule support is experimental)
---------------------------------------------------------------------------
EmptyInChiError Traceback (most recent call last)
Cell In[1], line 17
12 print(f"{openff.nagl_models.__version__=}")
15 molecule = Molecule.from_smiles(341 * "C")
---> 17 molecule.assign_partial_charges(
18 partial_charge_method="openff-gnn-am1bcc-0.1.0-rc.3.pt",
19 toolkit_registry=NAGLToolkitWrapper(),
20 )
File ~/micromamba/envs/openff-interchange-env/lib/python3.11/site-packages/openff/toolkit/topology/molecule.py:2687, in FrozenMolecule.assign_partial_charges(self, partial_charge_method, strict_n_conformers, use_conformers, toolkit_registry, normalize_partial_charges)
2685 elif isinstance(toolkit_registry, ToolkitWrapper):
2686 toolkit_wrapper: ToolkitWrapper = toolkit_registry
-> 2687 toolkit_wrapper.assign_partial_charges( # type: ignore[attr-defined]
2688 self,
2689 partial_charge_method=partial_charge_method,
2690 use_conformers=use_conformers,
2691 strict_n_conformers=strict_n_conformers,
2692 normalize_partial_charges=normalize_partial_charges,
2693 _cls=self.__class__,
2694 )
2695 else:
2696 raise InvalidToolkitRegistryError(
2697 f"Invalid toolkit_registry passed to assign_partial_charges."
2698 f"Expected ToolkitRegistry or ToolkitWrapper. Got {type(toolkit_registry)}"
2699 )
File ~/micromamba/envs/openff-interchange-env/lib/python3.11/site-packages/openff/toolkit/utils/nagl_wrapper.py:142, in NAGLToolkitWrapper.assign_partial_charges(self, molecule, partial_charge_method, use_conformers, strict_n_conformers, normalize_partial_charges, _cls)
136 raise ChargeMethodUnavailableError(
137 f"Charge model {partial_charge_method} not supported by "
138 f"{self.__class__.__name__}."
139 ) from error
141 model = GNNModel.load(model_path, eval_mode=True)
--> 142 charges = model.compute_property(
143 molecule,
144 as_numpy=True,
145 readout_name="am1bcc_charges",
146 check_domains=True,
147 error_if_unsupported=True,
148 )
150 molecule.partial_charges = Quantity(
151 charges.astype(float),
152 unit.elementary_charge,
153 )
155 if normalize_partial_charges:
File ~/micromamba/envs/openff-interchange-env/lib/python3.11/site-packages/openff/nagl/nn/_models.py:402, in GNNModel.compute_property(self, molecule, readout_name, as_numpy, check_domains, error_if_unsupported, check_lookup_table)
362 def compute_property(
363 self,
364 molecule: "Molecule",
(...)
369 check_lookup_table: bool = True
370 ):
371 """
372 Compute the trained property for a molecule.
373
(...)
400 result: torch.Tensor or numpy.ndarray
401 """
--> 402 properties = self.compute_properties(
403 molecule=molecule,
404 as_numpy=as_numpy,
405 check_domains=check_domains,
406 error_if_unsupported=error_if_unsupported,
407 check_lookup_table=check_lookup_table
408 )
409 if readout_name is None:
410 if len(properties) == 1:
File ~/micromamba/envs/openff-interchange-env/lib/python3.11/site-packages/openff/nagl/nn/_models.py:201, in GNNModel.compute_properties(self, molecule, as_numpy, check_domains, error_if_unsupported, check_lookup_table)
197 fragments, all_indices = split_up_molecule(molecule)
198 # TODO: this assumes atom-wise properties
199 # we should add support for bond-wise/more general properties
--> 201 results = [
202 self._compute_properties(
203 fragment,
204 as_numpy=as_numpy,
205 check_domains=check_domains,
206 error_if_unsupported=error_if_unsupported,
207 check_lookup_table=check_lookup_table,
208 )
209 for fragment in fragments
210 ]
212 # combine the results
213 combined_results = {}
File ~/micromamba/envs/openff-interchange-env/lib/python3.11/site-packages/openff/nagl/nn/_models.py:202, in <listcomp>(.0)
197 fragments, all_indices = split_up_molecule(molecule)
198 # TODO: this assumes atom-wise properties
199 # we should add support for bond-wise/more general properties
201 results = [
--> 202 self._compute_properties(
203 fragment,
204 as_numpy=as_numpy,
205 check_domains=check_domains,
206 error_if_unsupported=error_if_unsupported,
207 check_lookup_table=check_lookup_table,
208 )
209 for fragment in fragments
210 ]
212 # combine the results
213 combined_results = {}
File ~/micromamba/envs/openff-interchange-env/lib/python3.11/site-packages/openff/nagl/nn/_models.py:289, in GNNModel._compute_properties(self, molecule, as_numpy, check_domains, error_if_unsupported, check_lookup_table)
287 for property_name in expected_value_keys:
288 try:
--> 289 value = self._check_property_lookup_table(
290 molecule=molecule,
291 readout_name=property_name,
292 )
293 except KeyError as e:
294 logger.info(
295 f"Could not find property in lookup table: {e}"
296 )
File ~/micromamba/envs/openff-interchange-env/lib/python3.11/site-packages/openff/nagl/nn/_models.py:359, in GNNModel._check_property_lookup_table(self, molecule, readout_name)
336 """
337 Check if the molecule is in the property lookup table.
338
(...)
355 if the molecule is not in the property lookup table
356 """
358 table = self.lookup_tables[readout_name]
--> 359 return table.lookup(molecule)
File ~/micromamba/envs/openff-interchange-env/lib/python3.11/site-packages/openff/nagl/lookups.py:141, in AtomPropertiesLookupTable.lookup(self, molecule)
121 """
122 Look up the property value for a molecule
123
(...)
137 If the property value cannot be found for this molecule
138 """
139 from openff.toolkit.topology import Molecule
--> 141 inchi_key = molecule.to_inchi(fixed_hydrogens=True)
142 try:
143 entry = self.properties[inchi_key]
File ~/micromamba/envs/openff-interchange-env/lib/python3.11/site-packages/openff/toolkit/topology/molecule.py:1760, in FrozenMolecule.to_inchi(self, fixed_hydrogens, toolkit_registry)
1732 """
1733 Create an InChI string for the molecule using the requested toolkit backend.
1734 InChI is a standardised representation that does not capture tautomers unless specified using the fixed
(...)
1756 If an invalid object is passed as the toolkit_registry parameter
1757 """
1759 if isinstance(toolkit_registry, ToolkitRegistry):
-> 1760 inchi = toolkit_registry.call(
1761 "to_inchi", self, fixed_hydrogens=fixed_hydrogens
1762 )
1763 elif isinstance(toolkit_registry, ToolkitWrapper):
1764 toolkit = toolkit_registry
File ~/micromamba/envs/openff-interchange-env/lib/python3.11/site-packages/openff/toolkit/utils/toolkit_registry.py:266, in ToolkitRegistry.call(self, method_name, raise_exception_types, *args, **kwargs)
264 for exception_type in raise_exception_types:
265 if isinstance(e, exception_type):
--> 266 raise e
267 errors.append((toolkit, e))
269 # No toolkit was found to provide the requested capability
270 # TODO: Can we help developers by providing a check for typos in expected method names?
File ~/micromamba/envs/openff-interchange-env/lib/python3.11/site-packages/openff/toolkit/utils/toolkit_registry.py:262, in ToolkitRegistry.call(self, method_name, raise_exception_types, *args, **kwargs)
260 method = getattr(toolkit, method_name)
261 try:
--> 262 return method(*args, **kwargs)
263 except Exception as e:
264 for exception_type in raise_exception_types:
File ~/micromamba/envs/openff-interchange-env/lib/python3.11/site-packages/openff/toolkit/utils/openeye_wrapper.py:1942, in OpenEyeToolkitWrapper.to_inchi(self, molecule, fixed_hydrogens)
1939 inchi = oechem.OEMolToSTDInChI(oemol)
1941 if len(inchi) == 0:
-> 1942 raise EmptyInChiError(
1943 "OEChem failed to generate an InChI for the molecule."
1944 )
1946 return inchi
EmptyInChiError: OEChem failed to generate an InChI for the molecule.
```
## Current environment ##
- Which version are you using? (run `python -c "import openff.nagl; print(openff.nagl.__version__)"`) 0.5.0
- Which version of Python (`python -V`)? 3.11
- Which operating system? macOS 15.1.1
- What is the output of `pip list`?
- If you use conda, what is the output of `conda list`? See linked toolkit issue
| openforcefield/openff-nagl | diff --git a/openff/nagl/tests/nn/test_model.py b/openff/nagl/tests/nn/test_model.py
index 5486b27..f6f4b83 100644
--- a/openff/nagl/tests/nn/test_model.py
+++ b/openff/nagl/tests/nn/test_model.py
@@ -419,6 +419,11 @@ class TestGNNModel:
[-0.738375, 0.246125, 0.246125, 0.246125],
atol=1e-5
)
+
+ def test_compute_long_molecule(self, am1bcc_model):
+ mol = Molecule.from_smiles(341 * "C")
+ charges = am1bcc_model.compute_property(mol, as_numpy=True)
+ assert charges.shape == (mol.n_atoms,)
class TestChargeGNNModelRC3(BaseTestChargeGNNModel):
model_name = "openff-gnn-am1bcc-0.1.0-rc.3"
diff --git a/openff/nagl/tests/test_lookups.py b/openff/nagl/tests/test_lookups.py
index 676699f..35fe030 100644
--- a/openff/nagl/tests/test_lookups.py
+++ b/openff/nagl/tests/test_lookups.py
@@ -97,3 +97,8 @@ class TestAtomPropertiesLookupTable:
mol = Molecule.from_mapped_smiles("[H:5][C:1]([H:6])([H:7])[N+2:2](-[O-:3])[O-:4]")
properties = lookup_table.lookup(mol)
assert_allclose(properties.numpy(), np.array([-0.103, 0.234, -0.209, -0.209, 0.096, 0.096, 0.096]))
+
+ def test_lookup_long(self, lookup_table):
+ mol = Molecule.from_smiles(341 * "C")
+ with pytest.raises(KeyError, match="failed to generate an InChI"):
+ lookup_table.lookup(mol)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": [
"devtools/conda-envs/base.yaml"
],
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "environment.yml",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.10",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | annotated-types @ file:///home/conda/feedstock_root/build_artifacts/annotated-types_1733247046149/work
Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1725267488082/work
bson @ file:///home/conda/feedstock_root/build_artifacts/bson_1736456155729/work
cached-property @ file:///home/conda/feedstock_root/build_artifacts/cached_property_1615209429212/work
cachetools @ file:///home/conda/feedstock_root/build_artifacts/cachetools_1740094013202/work
certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1739515848642/work/certifi
cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1725560520483/work
chardet @ file:///home/conda/feedstock_root/build_artifacts/chardet_1741797914774/work
charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1735929714516/work
click @ file:///home/conda/feedstock_root/build_artifacts/click_1734858813237/work
click-option-group @ file:///home/conda/feedstock_root/build_artifacts/click-option-group_1686394190925/work
codecov @ file:///home/conda/feedstock_root/build_artifacts/codecov_1734975286850/work
colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1733218098505/work
contourpy @ file:///home/conda/feedstock_root/build_artifacts/contourpy_1731428322366/work
coverage @ file:///home/conda/feedstock_root/build_artifacts/coverage_1743381215370/work
cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1733332471406/work
exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1733208806608/work
execnet @ file:///home/conda/feedstock_root/build_artifacts/execnet_1733230988954/work
filelock @ file:///home/conda/feedstock_root/build_artifacts/filelock_1741969488311/work
flexcache @ file:///home/conda/feedstock_root/build_artifacts/flexcache_1733663531877/work
flexparser @ file:///home/conda/feedstock_root/build_artifacts/flexparser_1733663506167/work
fonttools @ file:///home/conda/feedstock_root/build_artifacts/fonttools_1738940303262/work
freetype-py @ file:///home/conda/feedstock_root/build_artifacts/freetype-py_1650983368720/work
fsspec @ file:///home/conda/feedstock_root/build_artifacts/fsspec_1743361113926/work
gmpy2 @ file:///home/conda/feedstock_root/build_artifacts/gmpy2_1733462536562/work
greenlet @ file:///home/conda/feedstock_root/build_artifacts/greenlet_1734532786161/work
h2 @ file:///home/conda/feedstock_root/build_artifacts/h2_1738578511449/work
hpack @ file:///home/conda/feedstock_root/build_artifacts/hpack_1737618293087/work
hyperframe @ file:///home/conda/feedstock_root/build_artifacts/hyperframe_1737618333194/work
idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1733211830134/work
importlib_metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1737420181517/work
importlib_resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1736252299705/work
iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1733223141826/work
Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1741263328855/work
kiwisolver @ file:///home/conda/feedstock_root/build_artifacts/kiwisolver_1725459213453/work
lightning-utilities @ file:///home/conda/feedstock_root/build_artifacts/lightning-utilities_1742509992526/work
markdown-it-py @ file:///home/conda/feedstock_root/build_artifacts/markdown-it-py_1733250460757/work
MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1733219680183/work
matplotlib==3.10.1
mdurl @ file:///home/conda/feedstock_root/build_artifacts/mdurl_1733255585584/work
mpmath @ file:///home/conda/feedstock_root/build_artifacts/mpmath_1733302684489/work
munkres==1.1.4
networkx @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_networkx_1731521053/work
numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1742254824252/work/dist/numpy-2.2.4-cp310-cp310-linux_x86_64.whl#sha256=89bf69a84640f36d25198a479bdb3885a47026a4604a6e700781b7b89b023bd8
openff-amber-ff-ports @ file:///home/conda/feedstock_root/build_artifacts/openff-amber-ff-ports_1699301684704/work
-e git+https://github.com/openforcefield/openff-nagl.git@92d3302570664bec4b4719d49bfa8f68ef4090df#egg=openff_nagl
openff-toolkit @ file:///home/conda/feedstock_root/build_artifacts/openff-toolkit-split_1741275622781/work
openff-units @ file:///home/conda/feedstock_root/build_artifacts/openff-units_1740585382576/work
openff-utilities @ file:///home/conda/feedstock_root/build_artifacts/openff-utilities_1738856470282/work
openforcefields @ file:///home/conda/feedstock_root/build_artifacts/openff-forcefields_1726151618592/work
optree @ file:///home/conda/feedstock_root/build_artifacts/optree_1741963800265/work
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1733203243479/work
pandas @ file:///home/conda/feedstock_root/build_artifacts/pandas_1726878398774/work
pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1735929693232/work
Pint @ file:///home/conda/feedstock_root/build_artifacts/pint_1733663494434/work
platformdirs @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_platformdirs_1742485085/work
pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1733222765875/work
pybind11 @ file:///D:/bld/pybind11-split_1730237329882/work
pybind11_global @ file:///home/conda/feedstock_root/build_artifacts/pybind11-split_1730237328107/work
pycairo==1.27.0
pycparser @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_pycparser_1733195786/work
pydantic @ file:///home/conda/feedstock_root/build_artifacts/pydantic_1737761369378/work
pydantic_core @ file:///home/conda/feedstock_root/build_artifacts/pydantic-core_1734571487232/work
Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1736243443484/work
pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1743089729650/work
PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1733217236728/work
pytest @ file:///home/conda/feedstock_root/build_artifacts/pytest_1740946542080/work
pytest-cov @ file:///home/conda/feedstock_root/build_artifacts/pytest-cov_1733223023082/work
pytest-xdist @ file:///home/conda/feedstock_root/build_artifacts/pytest-xdist_1733240780199/work
python-constraint @ file:///home/conda/feedstock_root/build_artifacts/python-constraint_1734362967806/work
python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1733215673016/work
pytorch-lightning @ file:///home/conda/feedstock_root/build_artifacts/pytorch-lightning_1742579345300/work
pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1706886791323/work
PyYAML @ file:///home/conda/feedstock_root/build_artifacts/pyyaml_1737454647378/work
rdkit @ file:///home/conda/feedstock_root/build_artifacts/rdkit-meta_1740757845765/work
reportlab @ file:///home/conda/feedstock_root/build_artifacts/reportlab_1739375622146/work
requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1733217035951/work
rich @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_rich_1743371105/work/dist
rlPyCairo @ file:///home/conda/feedstock_root/build_artifacts/rlpycairo_1687519531733/work
scipy @ file:///home/conda/feedstock_root/build_artifacts/scipy-split_1739790642651/work/dist/scipy-1.15.2-cp310-cp310-linux_x86_64.whl#sha256=9e52bad6c3294d1a5b04a13632118ca2157130603c6c018c2d710162b223b27e
six @ file:///home/conda/feedstock_root/build_artifacts/six_1733380938961/work
smirnoff99frosst @ file:///home/conda/feedstock_root/build_artifacts/smirnoff99frosst_1610626268593/work
SQLAlchemy @ file:///home/conda/feedstock_root/build_artifacts/sqlalchemy_1743109639410/work
sympy @ file:///home/conda/feedstock_root/build_artifacts/sympy_1736248176451/work
toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1734091811753/work
tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1733256695513/work
torch @ file:///home/conda/feedstock_root/build_artifacts/libtorch_1742916229386/work
torchmetrics @ file:///home/conda/feedstock_root/build_artifacts/torchmetrics_1742578969070/work
tqdm @ file:///home/conda/feedstock_root/build_artifacts/tqdm_1735661334605/work
typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_typing_extensions_1743201626/work
tzdata @ file:///home/conda/feedstock_root/build_artifacts/python-tzdata_1742745135198/work
unicodedata2 @ file:///home/conda/feedstock_root/build_artifacts/unicodedata2_1736692496989/work
urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1734859416348/work
xmltodict @ file:///home/conda/feedstock_root/build_artifacts/xmltodict_1732988210345/work
zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1732827521216/work
zstandard==0.23.0
| name: openff-nagl
channels:
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _openmp_mutex=4.5=3_kmp_llvm
- annotated-types=0.7.0=pyhd8ed1ab_1
- brotli=1.1.0=hb9d3cd8_2
- brotli-bin=1.1.0=hb9d3cd8_2
- brotli-python=1.1.0=py310hf71b8c6_2
- bson=0.5.10=pyhd8ed1ab_0
- bzip2=1.0.8=h4bc722e_7
- ca-certificates=2025.1.31=hbcca054_0
- cached-property=1.5.2=hd8ed1ab_1
- cached_property=1.5.2=pyha770c72_1
- cachetools=5.5.2=pyhd8ed1ab_0
- cairo=1.18.4=h3394656_0
- certifi=2025.1.31=pyhd8ed1ab_0
- cffi=1.17.1=py310h8deb56e_0
- chardet=5.2.0=pyhd8ed1ab_3
- charset-normalizer=3.4.1=pyhd8ed1ab_0
- click=8.1.8=pyh707e725_0
- click-option-group=0.5.6=pyhd8ed1ab_0
- codecov=2.1.13=pyhd8ed1ab_1
- colorama=0.4.6=pyhd8ed1ab_1
- contourpy=1.3.1=py310h3788b33_0
- coverage=7.8.0=py310h89163eb_0
- cpython=3.10.16=py310hd8ed1ab_1
- cycler=0.12.1=pyhd8ed1ab_1
- cyrus-sasl=2.1.27=h54b06d7_7
- exceptiongroup=1.2.2=pyhd8ed1ab_1
- execnet=2.1.1=pyhd8ed1ab_1
- filelock=3.18.0=pyhd8ed1ab_0
- flexcache=0.3=pyhd8ed1ab_1
- flexparser=0.4=pyhd8ed1ab_1
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=h77eed37_3
- fontconfig=2.15.0=h7e30c49_1
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- fonttools=4.56.0=py310h89163eb_0
- freetype=2.13.3=h48d6fc4_0
- freetype-py=2.3.0=pyhd8ed1ab_0
- fsspec=2025.3.1=pyhd8ed1ab_0
- gmp=6.3.0=hac33072_2
- gmpy2=2.1.5=py310he8512ff_3
- greenlet=3.1.1=py310hf71b8c6_1
- h2=4.2.0=pyhd8ed1ab_0
- hpack=4.1.0=pyhd8ed1ab_0
- hyperframe=6.1.0=pyhd8ed1ab_0
- icu=75.1=he02047a_0
- idna=3.10=pyhd8ed1ab_1
- importlib-metadata=8.6.1=pyha770c72_0
- importlib_resources=6.5.2=pyhd8ed1ab_0
- iniconfig=2.0.0=pyhd8ed1ab_1
- jinja2=3.1.6=pyhd8ed1ab_0
- keyutils=1.6.1=h166bdaf_0
- kiwisolver=1.4.7=py310h3788b33_0
- krb5=1.21.3=h659f571_0
- lcms2=2.17=h717163a_0
- ld_impl_linux-64=2.43=h712a8e2_4
- lerc=4.0.0=h27087fc_0
- libabseil=20250127.1=cxx17_hbbce691_0
- libblas=3.9.0=31_hfdb39a5_mkl
- libboost=1.86.0=h6c02f8c_3
- libboost-python=1.86.0=py310ha2bacc8_3
- libbrotlicommon=1.1.0=hb9d3cd8_2
- libbrotlidec=1.1.0=hb9d3cd8_2
- libbrotlienc=1.1.0=hb9d3cd8_2
- libcblas=3.9.0=31_h372d94f_mkl
- libdeflate=1.23=h4ddbbb0_0
- libedit=3.1.20250104=pl5321h7949ede_0
- libexpat=2.6.4=h5888daf_0
- libffi=3.4.6=h2dba641_0
- libgcc=14.2.0=h767d61c_2
- libgcc-ng=14.2.0=h69a702a_2
- libgfortran=14.2.0=h69a702a_2
- libgfortran5=14.2.0=hf1ad2bd_2
- libglib=2.84.0=h2ff4ddf_0
- libhwloc=2.11.2=default_h0d58e46_1001
- libiconv=1.18=h4ce23a2_1
- libjpeg-turbo=3.0.0=hd590300_1
- liblapack=3.9.0=31_hc41d3b0_mkl
- liblzma=5.6.4=hb9d3cd8_0
- libnsl=2.0.1=hd590300_0
- libntlm=1.8=hb9d3cd8_0
- libpng=1.6.47=h943b412_0
- libpq=17.4=h27ae623_0
- libprotobuf=5.29.3=h501fc15_0
- librdkit=2024.09.6=h84b0b3c_0
- libsqlite=3.49.1=hee588c1_2
- libstdcxx=14.2.0=h8f9b012_2
- libstdcxx-ng=14.2.0=h4852527_2
- libtiff=4.7.0=hd9ff511_3
- libtorch=2.6.0=cpu_mkl_hec71012_103
- libuuid=2.38.1=h0b41bf4_0
- libuv=1.50.0=hb9d3cd8_0
- libwebp-base=1.5.0=h851e524_0
- libxcb=1.17.0=h8a09558_0
- libxcrypt=4.4.36=hd590300_1
- libxml2=2.13.7=h8d12d68_0
- libzlib=1.3.1=hb9d3cd8_2
- lightning-utilities=0.14.2=pyhd8ed1ab_0
- llvm-openmp=20.1.1=h024ca30_1
- markdown-it-py=3.0.0=pyhd8ed1ab_1
- markupsafe=3.0.2=py310h89163eb_1
- matplotlib-base=3.10.1=py310h68603db_0
- mdurl=0.1.2=pyhd8ed1ab_1
- mkl=2024.2.2=ha957f24_16
- mpc=1.3.1=h24ddda3_1
- mpfr=4.2.1=h90cbb55_3
- mpmath=1.3.0=pyhd8ed1ab_1
- munkres=1.1.4=pyh9f0ad1d_0
- ncurses=6.5=h2d0b736_3
- networkx=3.4.2=pyh267e887_2
- numpy=2.2.4=py310hefbff90_0
- openff-amber-ff-ports=0.0.4=pyhca7485f_0
- openff-forcefields=2024.09.0=pyhff2d567_0
- openff-toolkit-base=0.16.8=pyhd8ed1ab_2
- openff-units=0.3.0=pyhd8ed1ab_1
- openff-utilities=0.1.15=pyhd8ed1ab_0
- openjpeg=2.5.3=h5fbd93e_0
- openldap=2.6.9=he970967_0
- openssl=3.4.1=h7b32b05_0
- optree=0.14.1=py310h3788b33_1
- packaging=24.2=pyhd8ed1ab_2
- pandas=2.2.3=py310h5eaa309_1
- pcre2=10.44=hba22ea6_2
- pillow=11.1.0=py310h7e6dc6c_0
- pint=0.24.4=pyhd8ed1ab_1
- pip=25.0.1=pyh8b19718_0
- pixman=0.44.2=h29eaf8c_0
- platformdirs=4.3.7=pyh29332c3_0
- pluggy=1.5.0=pyhd8ed1ab_1
- pthread-stubs=0.4=hb9d3cd8_1002
- pybind11=2.13.6=pyh1ec8472_2
- pybind11-global=2.13.6=pyh415d2e4_2
- pycairo=1.27.0=py310h25ff670_0
- pycparser=2.22=pyh29332c3_1
- pydantic=2.10.6=pyh3cfb1c2_0
- pydantic-core=2.27.2=py310h505e2c1_0
- pygments=2.19.1=pyhd8ed1ab_0
- pyparsing=3.2.3=pyhd8ed1ab_1
- pysocks=1.7.1=pyha55dd90_7
- pytest=8.3.5=pyhd8ed1ab_0
- pytest-cov=6.0.0=pyhd8ed1ab_1
- pytest-xdist=3.6.1=pyhd8ed1ab_1
- python=3.10.16=he725a3c_1_cpython
- python-constraint=1.4.0=pyhff2d567_1
- python-dateutil=2.9.0.post0=pyhff2d567_1
- python-tzdata=2025.2=pyhd8ed1ab_0
- python_abi=3.10=5_cp310
- pytorch=2.6.0=cpu_mkl_py310_h90decc8_103
- pytorch-lightning=2.5.1=pyh506cb10_0
- pytz=2024.1=pyhd8ed1ab_0
- pyyaml=6.0.2=py310h89163eb_2
- qhull=2020.2=h434a139_5
- rdkit=2024.09.6=py310hcd13295_0
- readline=8.2=h8c095d6_2
- reportlab=4.3.1=py310ha75aee5_0
- requests=2.32.3=pyhd8ed1ab_1
- rich=14.0.0=pyh29332c3_0
- rlpycairo=0.2.0=pyhd8ed1ab_0
- scipy=1.15.2=py310h1d65ade_0
- setuptools=75.8.2=pyhff2d567_0
- six=1.17.0=pyhd8ed1ab_0
- sleef=3.8=h1b44611_0
- smirnoff99frosst=1.1.0=pyh44b312d_0
- sqlalchemy=2.0.40=py310ha75aee5_0
- sympy=1.13.3=pyh2585a3b_105
- tbb=2021.13.0=hceb3a55_1
- tk=8.6.13=noxft_h4845f30_101
- toml=0.10.2=pyhd8ed1ab_1
- tomli=2.2.1=pyhd8ed1ab_1
- torchmetrics=1.7.0=pyhd8ed1ab_0
- tqdm=4.67.1=pyhd8ed1ab_1
- typing-extensions=4.13.0=h9fa5a19_1
- typing_extensions=4.13.0=pyh29332c3_1
- tzdata=2025b=h78e105d_0
- unicodedata2=16.0.0=py310ha75aee5_0
- urllib3=2.3.0=pyhd8ed1ab_0
- wheel=0.45.1=pyhd8ed1ab_1
- xmltodict=0.14.2=pyhd8ed1ab_1
- xorg-libice=1.1.2=hb9d3cd8_0
- xorg-libsm=1.2.6=he73a12e_0
- xorg-libx11=1.8.12=h4f16b4b_0
- xorg-libxau=1.0.12=hb9d3cd8_0
- xorg-libxdmcp=1.1.5=hb9d3cd8_0
- xorg-libxext=1.3.6=hb9d3cd8_0
- xorg-libxrender=0.9.12=hb9d3cd8_0
- yaml=0.2.5=h7f98852_2
- zipp=3.21.0=pyhd8ed1ab_1
- zstandard=0.23.0=py310ha75aee5_1
- zstd=1.5.7=hb8e6e7a_2
prefix: /opt/conda/envs/openff-nagl
| [
"openff/nagl/tests/test_lookups.py::TestAtomPropertiesLookupTable::test_lookup_long"
] | [
"openff/nagl/tests/nn/test_model.py::TestGNNModel::test_load",
"openff/nagl/tests/nn/test_model.py::TestGNNModel::test_load_model_with_kwargs",
"openff/nagl/tests/nn/test_model.py::TestGNNModel::test_protein_computable"
] | [
"openff/nagl/tests/nn/test_model.py::TestBaseGNNModel::test_init",
"openff/nagl/tests/nn/test_model.py::TestGNNModel::test_init",
"openff/nagl/tests/test_lookups.py::TestAtomPropertiesLookupTable::test_validate_property_lookup_table_conversion_from_list",
"openff/nagl/tests/test_lookups.py::TestAtomPropertiesLookupTable::test_creation_with_wrong_key",
"openff/nagl/tests/test_lookups.py::TestAtomPropertiesLookupTable::test_lookup",
"openff/nagl/tests/test_lookups.py::TestAtomPropertiesLookupTable::test_lookup_failure",
"openff/nagl/tests/test_lookups.py::TestAtomPropertiesLookupTable::test_lookup_with_different_connectivity"
] | [] | MIT License | 20,500 | 209 | [
"openff/nagl/lookups.py"
] |
|
sanic-org__sanic-3015 | 665234da121ed74e9155c3c455afa3ee2c71fb58 | 2024-12-16 16:38:27 | 4327b8b27eeccb8067788ed9f098c05b83b1109d | diff --git a/sanic/worker/manager.py b/sanic/worker/manager.py
index 4c9487bc..02d11848 100644
--- a/sanic/worker/manager.py
+++ b/sanic/worker/manager.py
@@ -372,7 +372,10 @@ class WorkerManager:
for process in self.processes:
logger.info("Killing %s [%s]", process.name, process.pid)
with suppress(ProcessLookupError):
- os.kill(process.pid, SIGKILL)
+ try:
+ os.killpg(os.getpgid(process.pid), SIGKILL)
+ except OSError:
+ os.kill(process.pid, SIGKILL)
raise ServerKilled
def shutdown_signal(self, signal, frame):
@@ -381,6 +384,7 @@ class WorkerManager:
logger.info("Shutdown interrupted. Killing.")
with suppress(ServerKilled):
self.kill()
+ return
logger.info("Received signal %s. Shutting down.", Signals(signal).name)
self.monitor_publisher.send(None)
| Cannot use Ctrl + C to exit the program normally
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
Use Ctrl + C to interrupt the program when the program startup is not complete, and subsequent Ctrl + C will fail.
The operation procedure and related logs are as follows:
1. Use Ctrl + C for the first time
[2024-07-16 13:04:06 +0800] [19752] [INFO] Received signal SIGINT. Shutting down.
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/sanic/worker/serve.py", line 117, in worker_serve
return _serve_http_1(
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/sanic/server/runners.py", line 257, in _serve_http_1
loop.run_until_complete(app._server_event("init", "before"))
File "uvloop/loop.pyx", line 1511, in uvloop.loop.Loop.run_until_complete
File "uvloop/loop.pyx", line 1504, in uvloop.loop.Loop.run_until_complete
File "uvloop/loop.pyx", line 1377, in uvloop.loop.Loop.run_forever
File "uvloop/loop.pyx", line 555, in uvloop.loop.Loop._run
File "uvloop/handles/poll.pyx", line 216, in uvloop.loop.__on_uvpoll_event
File "uvloop/cbhandles.pyx", line 83, in uvloop.loop.Handle._run
File "uvloop/cbhandles.pyx", line 66, in uvloop.loop.Handle._run
File "uvloop/loop.pyx", line 397, in uvloop.loop.Loop._read_from_self
File "uvloop/loop.pyx", line 402, in uvloop.loop.Loop._invoke_signals
File "uvloop/loop.pyx", line 377, in uvloop.loop.Loop._ceval_process_signals
KeyboardInterrupt
2. Use Ctrl + C for the second or more time
[2024-07-16 13:04:08 +0800] [19752] [INFO] Shutdown interrupted. Killing.
[2024-07-16 13:04:08 +0800] [19752] [INFO] Killing Sanic-Server-0-0 [19757]
[2024-07-16 13:04:08 +0800] [19752] [INFO] Received signal SIGINT. Shutting down.
### Code snippet
```python
import asyncio
from sanic.app import Sanic
app = Sanic(name='test_sanic')
@app.listener('before_server_start')
async def before_server_start(app, loop):
await asyncio.sleep(5)
```
### Expected Behavior
_No response_
### How do you run Sanic?
Sanic CLI
### Operating System
MacOS
### Sanic Version
23.12.2
### Additional context
_No response_ | sanic-org/sanic | diff --git a/tests/test_reloader.py b/tests/test_reloader.py
index 8df46f64..3c478dab 100644
--- a/tests/test_reloader.py
+++ b/tests/test_reloader.py
@@ -1,243 +1,246 @@
-import os
-import secrets
-import sys
-
-from contextlib import suppress
-from subprocess import PIPE, Popen, TimeoutExpired
-from tempfile import TemporaryDirectory
-from textwrap import dedent
-from threading import Timer
-from time import sleep
+# 2024-12-22 AMH - Reloader tests have not been working for a while.
+# We need to re-implement them.
-import pytest
+# import os
+# import secrets
+# import sys
+# from contextlib import suppress
+# from subprocess import PIPE, Popen, TimeoutExpired
+# from tempfile import TemporaryDirectory
+# from textwrap import dedent
+# from threading import Timer
+# from time import sleep
-# We need to interrupt the autoreloader without killing it,
-# so that the server gets terminated
-# https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
+# import pytest
-try:
- from signal import CTRL_BREAK_EVENT
- from subprocess import CREATE_NEW_PROCESS_GROUP
- flags = CREATE_NEW_PROCESS_GROUP
-except ImportError:
- flags = 0
+# # We need to interrupt the autoreloader without killing it,
+# # so that the server gets terminated
+# # https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
-TIMER_DELAY = 2
+# try:
+# from signal import CTRL_BREAK_EVENT
+# from subprocess import CREATE_NEW_PROCESS_GROUP
+# flags = CREATE_NEW_PROCESS_GROUP
+# except ImportError:
+# flags = 0
-def terminate(proc):
- if flags:
- proc.send_signal(CTRL_BREAK_EVENT)
- else:
- proc.terminate()
+# TIMER_DELAY = 2
-def write_app(filename, **runargs):
- text = secrets.token_urlsafe()
- with open(filename, "w") as f:
- f.write(
- dedent(
- f"""\
- import os
- from sanic import Sanic
-
- app = Sanic(__name__)
+# def terminate(proc):
+# if flags:
+# proc.send_signal(CTRL_BREAK_EVENT)
+# else:
+# proc.terminate()
- app.route("/")(lambda x: x)
-
- @app.listener("after_server_start")
- def complete(*args):
- print("complete", os.getpid(), {text!r})
-
- if __name__ == "__main__":
- app.run(**{runargs!r})
- """
- )
- )
- return text
-
-
-def write_listener_app(filename, **runargs):
- start_text = secrets.token_urlsafe()
- stop_text = secrets.token_urlsafe()
- with open(filename, "w") as f:
- f.write(
- dedent(
- f"""\
- import os
- from sanic import Sanic
-
- app = Sanic(__name__)
-
- app.route("/")(lambda x: x)
-
- @app.reload_process_start
- async def reload_start(*_):
- print("reload_start", os.getpid(), {start_text!r})
-
- @app.reload_process_stop
- async def reload_stop(*_):
- print("reload_stop", os.getpid(), {stop_text!r})
-
- if __name__ == "__main__":
- app.run(**{runargs!r})
- """
- )
- )
- return start_text, stop_text
-
-
-def write_json_config_app(filename, jsonfile, **runargs):
- with open(filename, "w") as f:
- f.write(
- dedent(
- f"""\
- import os
- from sanic import Sanic
- import json
-
- app = Sanic(__name__)
- with open("{jsonfile}", "r") as f:
- config = json.load(f)
- app.config.update_config(config)
-
- app.route("/")(lambda x: x)
-
- @app.listener("after_server_start")
- def complete(*args):
- print("complete", os.getpid(), app.config.FOO)
-
- if __name__ == "__main__":
- app.run(**{runargs!r})
- """
- )
- )
-
-
-def write_file(filename):
- text = secrets.token_urlsafe()
- with open(filename, "w") as f:
- f.write(f"""{{"FOO": "{text}"}}""")
- return text
-
-
-def scanner(proc, trigger="complete"):
- for line in proc.stdout:
- line = line.decode().strip()
- if line.startswith(trigger):
- yield line
-
-
-argv = dict(
- script=[sys.executable, "reloader.py"],
- module=[sys.executable, "-m", "reloader"],
- sanic=[
- sys.executable,
- "-m",
- "sanic",
- "--port",
- "42204",
- "--auto-reload",
- "reloader.app",
- ],
-)
-
-
[email protected](
- "runargs, mode",
- [
- (dict(port=42202, auto_reload=True), "script"),
- (dict(port=42203, auto_reload=True), "module"),
- ({}, "sanic"),
- ],
-)
[email protected]
-async def test_reloader_live(runargs, mode):
- with TemporaryDirectory() as tmpdir:
- filename = os.path.join(tmpdir, "reloader.py")
- text = write_app(filename, **runargs)
- command = argv[mode]
- proc = Popen(command, cwd=tmpdir, stdout=PIPE, creationflags=flags)
- try:
- timeout = Timer(TIMER_DELAY, terminate, [proc])
- timeout.start()
- # Python apparently keeps using the old source sometimes if
- # we don't sleep before rewrite (pycache timestamp problem?)
- sleep(1)
- line = scanner(proc)
- assert text in next(line)
- # Edit source code and try again
- text = write_app(filename, **runargs)
- assert text in next(line)
- finally:
- timeout.cancel()
- terminate(proc)
- with suppress(TimeoutExpired):
- proc.wait(timeout=3)
-
-
[email protected](
- "runargs, mode",
- [
- (dict(port=42302, auto_reload=True), "script"),
- (dict(port=42303, auto_reload=True), "module"),
- ({}, "sanic"),
- ],
-)
[email protected]
-async def test_reloader_live_with_dir(runargs, mode):
- with TemporaryDirectory() as tmpdir:
- filename = os.path.join(tmpdir, "reloader.py")
- config_file = os.path.join(tmpdir, "config.json")
- runargs["reload_dir"] = tmpdir
- write_json_config_app(filename, config_file, **runargs)
- text = write_file(config_file)
- command = argv[mode]
- if mode == "sanic":
- command += ["--reload-dir", tmpdir]
- proc = Popen(command, cwd=tmpdir, stdout=PIPE, creationflags=flags)
- try:
- timeout = Timer(TIMER_DELAY, terminate, [proc])
- timeout.start()
- # Python apparently keeps using the old source sometimes if
- # we don't sleep before rewrite (pycache timestamp problem?)
- sleep(1)
- line = scanner(proc)
- assert text in next(line)
- # Edit source code and try again
- text = write_file(config_file)
- assert text in next(line)
- finally:
- timeout.cancel()
- terminate(proc)
- with suppress(TimeoutExpired):
- proc.wait(timeout=3)
-
-
-def test_reload_listeners():
- with TemporaryDirectory() as tmpdir:
- filename = os.path.join(tmpdir, "reloader.py")
- start_text, stop_text = write_listener_app(
- filename, port=42305, auto_reload=True
- )
-
- proc = Popen(
- argv["script"], cwd=tmpdir, stdout=PIPE, creationflags=flags
- )
- try:
- timeout = Timer(TIMER_DELAY, terminate, [proc])
- timeout.start()
- # Python apparently keeps using the old source sometimes if
- # we don't sleep before rewrite (pycache timestamp problem?)
- sleep(1)
- line = scanner(proc, "reload_start")
- assert start_text in next(line)
- line = scanner(proc, "reload_stop")
- assert stop_text in next(line)
- finally:
- timeout.cancel()
- terminate(proc)
- with suppress(TimeoutExpired):
- proc.wait(timeout=3)
+
+# def write_app(filename, **runargs):
+# text = secrets.token_urlsafe()
+# with open(filename, "w") as f:
+# f.write(
+# dedent(
+# f"""\
+# import os
+# from sanic import Sanic
+
+# app = Sanic(__name__)
+
+# app.route("/")(lambda x: x)
+
+# @app.listener("after_server_start")
+# def complete(*args):
+# print("complete", os.getpid(), {text!r})
+
+# if __name__ == "__main__":
+# app.run(**{runargs!r})
+# """
+# )
+# )
+# return text
+
+
+# def write_listener_app(filename, **runargs):
+# start_text = secrets.token_urlsafe()
+# stop_text = secrets.token_urlsafe()
+# with open(filename, "w") as f:
+# f.write(
+# dedent(
+# f"""\
+# import os
+# from sanic import Sanic
+
+# app = Sanic(__name__)
+
+# app.route("/")(lambda x: x)
+
+# @app.reload_process_start
+# async def reload_start(*_):
+# print("reload_start", os.getpid(), {start_text!r})
+
+# @app.reload_process_stop
+# async def reload_stop(*_):
+# print("reload_stop", os.getpid(), {stop_text!r})
+
+# if __name__ == "__main__":
+# app.run(**{runargs!r})
+# """
+# )
+# )
+# return start_text, stop_text
+
+
+# def write_json_config_app(filename, jsonfile, **runargs):
+# with open(filename, "w") as f:
+# f.write(
+# dedent(
+# f"""\
+# import os
+# from sanic import Sanic
+# import json
+
+# app = Sanic(__name__)
+# with open("{jsonfile}", "r") as f:
+# config = json.load(f)
+# app.config.update_config(config)
+
+# app.route("/")(lambda x: x)
+
+# @app.listener("after_server_start")
+# def complete(*args):
+# print("complete", os.getpid(), app.config.FOO)
+
+# if __name__ == "__main__":
+# app.run(**{runargs!r})
+# """
+# )
+# )
+
+
+# def write_file(filename):
+# text = secrets.token_urlsafe()
+# with open(filename, "w") as f:
+# f.write(f"""{{"FOO": "{text}"}}""")
+# return text
+
+
+# def scanner(proc, trigger="complete"):
+# for line in proc.stdout:
+# line = line.decode().strip()
+# if line.startswith(trigger):
+# yield line
+
+
+# argv = dict(
+# script=[sys.executable, "reloader.py"],
+# module=[sys.executable, "-m", "reloader"],
+# sanic=[
+# sys.executable,
+# "-m",
+# "sanic",
+# "--port",
+# "42204",
+# "--auto-reload",
+# "reloader.app",
+# ],
+# )
+
+
+# @pytest.mark.parametrize(
+# "runargs, mode",
+# [
+# (dict(port=42202, auto_reload=True), "script"),
+# (dict(port=42203, auto_reload=True), "module"),
+# ({}, "sanic"),
+# ],
+# )
+# @pytest.mark.xfail
+# async def test_reloader_live(runargs, mode):
+# with TemporaryDirectory() as tmpdir:
+# filename = os.path.join(tmpdir, "reloader.py")
+# text = write_app(filename, **runargs)
+# command = argv[mode]
+# proc = Popen(command, cwd=tmpdir, stdout=PIPE, creationflags=flags)
+# try:
+# timeout = Timer(TIMER_DELAY, terminate, [proc])
+# timeout.start()
+# # Python apparently keeps using the old source sometimes if
+# # we don't sleep before rewrite (pycache timestamp problem?)
+# sleep(1)
+# line = scanner(proc)
+# assert text in next(line)
+# # Edit source code and try again
+# text = write_app(filename, **runargs)
+# assert text in next(line)
+# finally:
+# timeout.cancel()
+# terminate(proc)
+# with suppress(TimeoutExpired):
+# proc.wait(timeout=3)
+
+
+# @pytest.mark.parametrize(
+# "runargs, mode",
+# [
+# (dict(port=42302, auto_reload=True), "script"),
+# (dict(port=42303, auto_reload=True), "module"),
+# ({}, "sanic"),
+# ],
+# )
+# @pytest.mark.xfail
+# async def test_reloader_live_with_dir(runargs, mode):
+# with TemporaryDirectory() as tmpdir:
+# filename = os.path.join(tmpdir, "reloader.py")
+# config_file = os.path.join(tmpdir, "config.json")
+# runargs["reload_dir"] = tmpdir
+# write_json_config_app(filename, config_file, **runargs)
+# text = write_file(config_file)
+# command = argv[mode]
+# if mode == "sanic":
+# command += ["--reload-dir", tmpdir]
+# proc = Popen(command, cwd=tmpdir, stdout=PIPE, creationflags=flags)
+# try:
+# timeout = Timer(TIMER_DELAY, terminate, [proc])
+# timeout.start()
+# # Python apparently keeps using the old source sometimes if
+# # we don't sleep before rewrite (pycache timestamp problem?)
+# sleep(1)
+# line = scanner(proc)
+# assert text in next(line)
+# # Edit source code and try again
+# text = write_file(config_file)
+# assert text in next(line)
+# finally:
+# timeout.cancel()
+# terminate(proc)
+# with suppress(TimeoutExpired):
+# proc.wait(timeout=3)
+
+
+# def test_reload_listeners():
+# with TemporaryDirectory() as tmpdir:
+# filename = os.path.join(tmpdir, "reloader.py")
+# start_text, stop_text = write_listener_app(
+# filename, port=42305, auto_reload=True
+# )
+
+# proc = Popen(
+# argv["script"], cwd=tmpdir, stdout=PIPE, creationflags=flags
+# )
+# try:
+# timeout = Timer(TIMER_DELAY, terminate, [proc])
+# timeout.start()
+# # Python apparently keeps using the old source sometimes if
+# # we don't sleep before rewrite (pycache timestamp problem?)
+# sleep(1)
+# line = scanner(proc, "reload_start")
+# assert start_text in next(line)
+# line = scanner(proc, "reload_stop")
+# assert stop_text in next(line)
+# finally:
+# timeout.cancel()
+# terminate(proc)
+# with suppress(TimeoutExpired):
+# proc.wait(timeout=3)
diff --git a/tests/test_requests.py b/tests/test_requests.py
index 28a211db..78fc488a 100644
--- a/tests/test_requests.py
+++ b/tests/test_requests.py
@@ -2244,18 +2244,26 @@ def test_conflicting_body_methods_overload(app: Sanic):
@app.put("/p/<foo>", name="three")
async def put(request, foo=None):
return json(
- {"name": request.route.name, "body": str(request.body), "foo": foo}
+ {
+ "name": request.route.name,
+ "body": str(request.body).replace(" ", ""),
+ "foo": foo,
+ }
)
@app.delete("/p/<foo>")
async def delete(request, foo):
return json(
- {"name": request.route.name, "body": str(request.body), "foo": foo}
+ {
+ "name": request.route.name,
+ "body": str(request.body).replace(" ", ""),
+ "foo": foo,
+ }
)
dumps = BaseHTTPResponse._dumps
payload = {"test": "OK"}
- data = str(dumps(payload).encode())
+ data = str(dumps(payload).encode()).replace(" ", "")
_, response = app.test_client.put("/", json=payload)
assert response.status == 200
diff --git a/tests/worker/test_manager.py b/tests/worker/test_manager.py
index 53180d6c..d75e6c80 100644
--- a/tests/worker/test_manager.py
+++ b/tests/worker/test_manager.py
@@ -66,10 +66,12 @@ def test_kill(os_mock: Mock):
process.pid = 1234
context = Mock()
context.Process.return_value = process
+ os_mock.getpgid.return_value = 5678
manager = WorkerManager(1, fake_serve, {}, context, (Mock(), Mock()), {})
with pytest.raises(ServerKilled):
manager.kill()
- os_mock.kill.assert_called_once_with(1234, SIGKILL)
+ os_mock.getpgid.assert_called_once_with(1234)
+ os_mock.killpg.assert_called_once_with(5678, SIGKILL)
@patch("sanic.worker.process.os")
@@ -81,13 +83,15 @@ def test_shutdown_signal_send_kill(
process.pid = 1234
context = Mock()
context.Process.return_value = process
+ manager_os_mock.getpgid.return_value = 5678
manager = WorkerManager(1, fake_serve, {}, context, (Mock(), Mock()), {})
assert manager._shutting_down is False
manager.shutdown_signal(SIGINT, None)
assert manager._shutting_down is True
process_os_mock.kill.assert_called_once_with(1234, SIGINT)
manager.shutdown_signal(SIGINT, None)
- manager_os_mock.kill.assert_called_once_with(1234, SIGKILL)
+ manager_os_mock.getpgid.assert_called_once_with(1234)
+ manager_os_mock.killpg.assert_called_once_with(5678, SIGKILL)
def test_restart_all():
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 24.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiofiles==24.1.0
anyio==4.9.0
async-generator==1.10
bandit==1.8.3
beautifulsoup4==4.13.3
certifi==2025.1.31
cffi==1.17.1
chardet==3.0.4
click==8.1.8
coverage==7.8.0
cryptography==44.0.2
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
filelock==3.18.0
h11==0.14.0
html5tagger==1.3.0
httpcore==1.0.7
httptools==0.6.4
httpx==0.28.1
idna==3.10
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig==2.1.0
Jinja2==3.1.6
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mdurl==0.1.2
multidict==6.2.0
mypy==1.15.0
mypy-extensions==1.0.0
packaging==24.2
pbr==6.1.1
platformdirs==4.3.7
pluggy==1.5.0
py==1.11.0
py-cpuinfo==9.0.0
pycparser==2.22
Pygments==2.19.1
pytest==8.3.5
pytest-benchmark==5.1.0
pytest-sanic==1.9.1
PyYAML==6.0.2
rich==14.0.0
ruff==0.11.2
-e git+https://github.com/sanic-org/sanic.git@665234da121ed74e9155c3c455afa3ee2c71fb58#egg=sanic
sanic-routing==23.12.0
sanic-testing==24.6.0
six==1.17.0
slotscheck==0.19.1
sniffio==1.3.1
soupsieve==2.6
stevedore==5.4.1
tomli==2.2.1
towncrier==24.8.0
tox==3.28.0
tracerite==1.1.1
types-ujson==5.10.0.20250326
typing_extensions==4.13.0
ujson==5.10.0
uvicorn==0.34.0
uvloop==0.21.0
virtualenv==20.29.3
websockets==10.4
zipp==3.21.0
| name: sanic
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiofiles==24.1.0
- anyio==4.9.0
- async-generator==1.10
- bandit==1.8.3
- beautifulsoup4==4.13.3
- certifi==2025.1.31
- cffi==1.17.1
- chardet==3.0.4
- click==8.1.8
- coverage==7.8.0
- cryptography==44.0.2
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- filelock==3.18.0
- h11==0.14.0
- html5tagger==1.3.0
- httpcore==1.0.7
- httptools==0.6.4
- httpx==0.28.1
- idna==3.10
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- jinja2==3.1.6
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- mdurl==0.1.2
- multidict==6.2.0
- mypy==1.15.0
- mypy-extensions==1.0.0
- packaging==24.2
- pbr==6.1.1
- platformdirs==4.3.7
- pluggy==1.5.0
- py==1.11.0
- py-cpuinfo==9.0.0
- pycparser==2.22
- pygments==2.19.1
- pytest==8.3.5
- pytest-benchmark==5.1.0
- pytest-sanic==1.9.1
- pyyaml==6.0.2
- rich==14.0.0
- ruff==0.11.2
- sanic==24.6.0
- sanic-routing==23.12.0
- sanic-testing==24.6.0
- six==1.17.0
- slotscheck==0.19.1
- sniffio==1.3.1
- soupsieve==2.6
- stevedore==5.4.1
- tomli==2.2.1
- towncrier==24.8.0
- tox==3.28.0
- tracerite==1.1.1
- types-ujson==5.10.0.20250326
- typing-extensions==4.13.0
- ujson==5.10.0
- uvicorn==0.34.0
- uvloop==0.21.0
- virtualenv==20.29.3
- websockets==10.4
- zipp==3.21.0
prefix: /opt/conda/envs/sanic
| [
"tests/worker/test_manager.py::test_kill",
"tests/worker/test_manager.py::test_shutdown_signal_send_kill"
] | [] | [
"tests/test_requests.py::test_sync",
"tests/test_requests.py::test_sync_asgi",
"tests/test_requests.py::test_ip",
"tests/test_requests.py::test_url_asgi",
"tests/test_requests.py::test_text",
"tests/test_requests.py::test_html",
"tests/test_requests.py::test_text_asgi",
"tests/test_requests.py::test_headers",
"tests/test_requests.py::test_headers_asgi",
"tests/test_requests.py::test_non_str_headers",
"tests/test_requests.py::test_non_str_headers_asgi",
"tests/test_requests.py::test_invalid_response",
"tests/test_requests.py::test_invalid_response_asgi",
"tests/test_requests.py::test_json",
"tests/test_requests.py::test_json_asgi",
"tests/test_requests.py::test_empty_json",
"tests/test_requests.py::test_empty_json_asgi",
"tests/test_requests.py::test_echo_json",
"tests/test_requests.py::test_echo_json_asgi",
"tests/test_requests.py::test_invalid_json",
"tests/test_requests.py::test_invalid_json_asgi",
"tests/test_requests.py::test_query_string",
"tests/test_requests.py::test_popped_stays_popped",
"tests/test_requests.py::test_query_string_asgi",
"tests/test_requests.py::test_uri_template",
"tests/test_requests.py::test_uri_template_asgi",
"tests/test_requests.py::test_token[None-a1d895e0-553a-421a-8e22-5ff8ecb48cbf]",
"tests/test_requests.py::test_token[Token-a1d895e0-553a-421a-8e22-5ff8ecb48cbf]",
"tests/test_requests.py::test_token[Bearer-a1d895e0-553a-421a-8e22-5ff8ecb48cbf]",
"tests/test_requests.py::test_token[None-None]",
"tests/test_requests.py::test_credentials[None-a1d895e0-553a-421a-8e22-5ff8ecb48cbf-None-None]",
"tests/test_requests.py::test_credentials[Token-a1d895e0-553a-421a-8e22-5ff8ecb48cbf-None-None]",
"tests/test_requests.py::test_credentials[Bearer-a1d895e0-553a-421a-8e22-5ff8ecb48cbf-None-None]",
"tests/test_requests.py::test_credentials[Basic-c29tZV91c2VybmFtZTpzb21lX3Bhc3M=-some_username-some_pass]",
"tests/test_requests.py::test_credentials[None-None-None-None]",
"tests/test_requests.py::test_content_type",
"tests/test_requests.py::test_content_type_asgi",
"tests/test_requests.py::test_standard_forwarded",
"tests/test_requests.py::test_standard_forwarded_asgi",
"tests/test_requests.py::test_remote_addr_with_two_proxies",
"tests/test_requests.py::test_remote_addr_with_two_proxies_asgi",
"tests/test_requests.py::test_remote_addr_without_proxy",
"tests/test_requests.py::test_remote_addr_without_proxy_asgi",
"tests/test_requests.py::test_remote_addr_custom_headers",
"tests/test_requests.py::test_remote_addr_custom_headers_asgi",
"tests/test_requests.py::test_forwarded_scheme",
"tests/test_requests.py::test_match_info",
"tests/test_requests.py::test_match_info_asgi",
"tests/test_requests.py::test_post_json",
"tests/test_requests.py::test_post_json_asgi",
"tests/test_requests.py::test_post_form_urlencoded",
"tests/test_requests.py::test_post_form_urlencoded_asgi",
"tests/test_requests.py::test_post_form_urlencoded_keep_blanks",
"tests/test_requests.py::test_post_form_urlencoded_keep_blanks_asgi",
"tests/test_requests.py::test_post_form_urlencoded_drop_blanks",
"tests/test_requests.py::test_post_form_urlencoded_drop_blanks_asgi",
"tests/test_requests.py::test_post_form_multipart_form_data[------sanic\\r\\nContent-Disposition:",
"tests/test_requests.py::test_post_form_multipart_form_data[------sanic\\r\\ncontent-disposition:",
"tests/test_requests.py::test_post_form_multipart_form_data_asgi[------sanic\\r\\nContent-Disposition:",
"tests/test_requests.py::test_post_form_multipart_form_data_asgi[------sanic\\r\\ncontent-disposition:",
"tests/test_requests.py::test_url_attributes_no_ssl[/foo--http://{}:{}/foo]",
"tests/test_requests.py::test_url_attributes_no_ssl[/bar/baz--http://{}:{}/bar/baz]",
"tests/test_requests.py::test_url_attributes_no_ssl[/moo/boo-arg1=val1-http://{}:{}/moo/boo?arg1=val1]",
"tests/test_requests.py::test_url_attributes_no_ssl_asgi[/foo--{}/foo]",
"tests/test_requests.py::test_url_attributes_no_ssl_asgi[/bar/baz--{}/bar/baz]",
"tests/test_requests.py::test_url_attributes_no_ssl_asgi[/moo/boo-arg1=val1-{}/moo/boo?arg1=val1]",
"tests/test_requests.py::test_form_with_multiple_values",
"tests/test_requests.py::test_form_with_multiple_values_asgi",
"tests/test_requests.py::test_request_string_representation",
"tests/test_requests.py::test_request_string_representation_asgi",
"tests/test_requests.py::test_request_multipart_files[------sanic\\r\\nContent-Disposition:",
"tests/test_requests.py::test_request_multipart_files[------sanic\\r\\ncontent-disposition:",
"tests/test_requests.py::test_request_multipart_files_asgi[------sanic\\r\\nContent-Disposition:",
"tests/test_requests.py::test_request_multipart_files_asgi[------sanic\\r\\ncontent-disposition:",
"tests/test_requests.py::test_request_multipart_file_with_json_content_type",
"tests/test_requests.py::test_request_multipart_file_with_json_content_type_asgi",
"tests/test_requests.py::test_request_multipart_file_without_field_name",
"tests/test_requests.py::test_request_multipart_file_duplicate_filed_name",
"tests/test_requests.py::test_request_multipart_file_duplicate_filed_name_asgi",
"tests/test_requests.py::test_request_multipart_with_multiple_files_and_type",
"tests/test_requests.py::test_request_multipart_with_multiple_files_and_type_asgi",
"tests/test_requests.py::test_request_repr",
"tests/test_requests.py::test_request_repr_asgi",
"tests/test_requests.py::test_request_bool",
"tests/test_requests.py::test_request_parsing_form_failed",
"tests/test_requests.py::test_request_parsing_form_failed_asgi",
"tests/test_requests.py::test_request_args_no_query_string",
"tests/test_requests.py::test_request_args_no_query_string_await",
"tests/test_requests.py::test_request_query_args",
"tests/test_requests.py::test_request_query_args_asgi",
"tests/test_requests.py::test_request_query_args_custom_parsing",
"tests/test_requests.py::test_request_query_args_custom_parsing_asgi",
"tests/test_requests.py::test_request_cookies",
"tests/test_requests.py::test_request_cookies_asgi",
"tests/test_requests.py::test_request_cookies_without_cookies",
"tests/test_requests.py::test_request_cookies_without_cookies_asgi",
"tests/test_requests.py::test_request_port",
"tests/test_requests.py::test_request_port_asgi",
"tests/test_requests.py::test_request_socket",
"tests/test_requests.py::test_request_server_name",
"tests/test_requests.py::test_request_server_name_in_host_header",
"tests/test_requests.py::test_request_server_name_forwarded",
"tests/test_requests.py::test_request_server_port",
"tests/test_requests.py::test_request_server_port_in_host_header",
"tests/test_requests.py::test_request_server_port_forwarded",
"tests/test_requests.py::test_request_form_invalid_content_type",
"tests/test_requests.py::test_server_name_and_url_for",
"tests/test_requests.py::test_url_for_with_forwarded_request",
"tests/test_requests.py::test_request_form_invalid_content_type_asgi",
"tests/test_requests.py::test_endpoint_basic",
"tests/test_requests.py::test_endpoint_basic_asgi",
"tests/test_requests.py::test_endpoint_named_app",
"tests/test_requests.py::test_endpoint_named_app_asgi",
"tests/test_requests.py::test_endpoint_blueprint",
"tests/test_requests.py::test_endpoint_blueprint_asgi",
"tests/test_requests.py::test_url_for_without_server_name",
"tests/test_requests.py::test_safe_method_with_body_ignored",
"tests/test_requests.py::test_safe_method_with_body",
"tests/test_requests.py::test_conflicting_body_methods_overload_error",
"tests/test_requests.py::test_conflicting_body_methods_overload",
"tests/test_requests.py::test_handler_overload_error",
"tests/test_requests.py::test_handler_overload",
"tests/worker/test_manager.py::test_manager_no_workers",
"tests/worker/test_manager.py::test_terminate",
"tests/worker/test_manager.py::test_shutown",
"tests/worker/test_manager.py::test_restart_all",
"tests/worker/test_manager.py::test_monitor_all[False]",
"tests/worker/test_manager.py::test_monitor_all[True]",
"tests/worker/test_manager.py::test_monitor_all_with_files[False]",
"tests/worker/test_manager.py::test_monitor_all_with_files[True]",
"tests/worker/test_manager.py::test_monitor_one_process[False]",
"tests/worker/test_manager.py::test_monitor_one_process[True]",
"tests/worker/test_manager.py::test_shutdown_signal",
"tests/worker/test_manager.py::test_shutdown_servers",
"tests/worker/test_manager.py::test_shutdown_servers_named",
"tests/worker/test_manager.py::test_scale",
"tests/worker/test_manager.py::test_manage_basic",
"tests/worker/test_manager.py::test_manage_transient",
"tests/worker/test_manager.py::test_manage_restartable",
"tests/worker/test_manager.py::test_manage_untracked",
"tests/worker/test_manager.py::test_manage_duplicate_ident",
"tests/worker/test_manager.py::test_transient_not_restartable",
"tests/worker/test_manager.py::test_remove_worker",
"tests/worker/test_manager.py::test_remove_untracked_worker"
] | [] | MIT License | 20,503 | 247 | [
"sanic/worker/manager.py"
] |
|
canonical__charmcraft-2042 | 0977fbf449d46accb75838b11db8590eb6f2b8de | 2024-12-16 17:07:44 | af377cd8592c90a4df7017712b93468a0b8041bf | diff --git a/charmcraft/application/commands/store.py b/charmcraft/application/commands/store.py
index 8515c306..80c7c1d4 100644
--- a/charmcraft/application/commands/store.py
+++ b/charmcraft/application/commands/store.py
@@ -1539,6 +1539,7 @@ class PublishLibCommand(CharmcraftCommand):
analysis.append((lib_data, error_message))
# work on the analysis result, showing messages to the user if not programmatic output
+ return_code = 0
for lib_data, error_message in analysis:
if error_message is None:
store.create_library_revision(
@@ -1555,6 +1556,7 @@ class PublishLibCommand(CharmcraftCommand):
)
else:
message = error_message
+ return_code = 1
if not parsed_args.format:
emit.message(message)
@@ -1577,6 +1579,8 @@ class PublishLibCommand(CharmcraftCommand):
output_data.append(datum)
emit.message(cli.format_content(output_data, parsed_args.format))
+ return return_code
+
class FetchLibCommand(CharmcraftCommand):
"""Fetch one or more charm libraries."""
| `charmcraft publish-lib` exits with 0 even if publishing fails
### Bug Description
Currently `charmcraft publish-lib` correctly fails the library push if something's wrong with LIBPATCH (e.g., LIBPATCH is not bumped, going back, etc). However, the exit of the command is always `0`.
This is a problem especially in CI, where a "publish libraries" step will never fail (because of the 0 exit code), and thus is undetectable.
### To Reproduce
1. Take a charm library of any charm you own
2. Modify the `LIBPATCH` to `LIBPATCH-1`
3. Run `charmcraft publish-lib` on that library
4. Read the failure message and check the exit code (it will be 0)
### Environment
charmcraft 3.2.2.post72+g3a094837 (3.x/edge) (but you can reproduce this with any version)
### charmcraft.yaml
```yaml
-
```
### Relevant log output
```shell
-
```
| canonical/charmcraft | diff --git a/tests/unit/commands/test_store.py b/tests/unit/commands/test_store.py
index 9d68d4bf..512f5cbf 100644
--- a/tests/unit/commands/test_store.py
+++ b/tests/unit/commands/test_store.py
@@ -30,9 +30,16 @@ from craft_store import models
from charmcraft import errors, store
from charmcraft.application import commands
from charmcraft.application.commands import SetResourceArchitecturesCommand
-from charmcraft.application.commands.store import FetchLibs, LoginCommand
+from charmcraft.application.commands import store as store_commands
+from charmcraft.application.commands.store import (
+ FetchLibs,
+ LoginCommand,
+ PublishLibCommand,
+)
from charmcraft.application.main import APP_METADATA
from charmcraft.models.project import CharmLib
+from charmcraft.services import CharmcraftServiceFactory
+from charmcraft.store.models import Library
from charmcraft.utils import cli
from tests import get_fake_revision
@@ -119,6 +126,40 @@ def test_set_resource_architectures_output_json(emitter, updates, expected):
emitter.assert_json_output(expected)
+def test_publish_lib_error(monkeypatch, new_path: pathlib.Path) -> None:
+ mock_service_factory = mock.Mock(spec=CharmcraftServiceFactory)
+ mock_service_factory.project.name = "test-project"
+ lib_path = new_path / "lib/charms/test_project/v0/my_lib.py"
+ lib_path.parent.mkdir(parents=True)
+ lib_path.write_text("LIBAPI=0\nLIBID='blah'\nLIBPATCH=1")
+
+ mock_store = mock.Mock()
+ mock_store.return_value.get_libraries_tips.return_value = {
+ ("blah", 0): Library(
+ charm_name="test-project",
+ lib_id="blah",
+ lib_name="my_lib",
+ api=0,
+ patch=2,
+ content=None,
+ content_hash="",
+ ),
+ }
+ monkeypatch.setattr(store_commands, "Store", mock_store)
+
+ cmd = PublishLibCommand({"app": APP_METADATA, "services": mock_service_factory})
+
+ assert (
+ cmd.run(
+ argparse.Namespace(
+ library="charms.test-project.v0.my_lib",
+ format=False,
+ )
+ )
+ == 1
+ )
+
+
@pytest.mark.parametrize(
("updates", "expected"),
[
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 3.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"ruff",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y python-apt-dev libapt-pkg-dev clang"
],
"python": "3.10",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | annotated-types==0.7.0
anyio==4.9.0
astroid==3.3.9
attrs==25.3.0
backports.tarfile==1.2.0
boolean.py==4.0
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
-e git+https://github.com/canonical/charmcraft.git@0977fbf449d46accb75838b11db8590eb6f2b8de#egg=charmcraft
charset-normalizer==3.4.1
coverage==7.8.0
craft-application==4.10.0
craft-archives==2.1.0
craft-cli==3.0.0
craft-grammar==2.0.3
craft-parts==2.7.0
craft-platforms==0.6.0
craft-providers==2.2.0
craft-store==3.2.1
cryptography==44.0.2
dill==0.3.9
distro==1.9.0
docker==7.1.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
freezegun==1.5.1
h11==0.14.0
httpcore==1.0.7
httplib2==0.22.0
httpx==0.28.1
humanize==4.12.2
hypothesis==6.130.6
idna==3.10
importlib_metadata==8.6.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==6.0.1
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
Jinja2==3.1.6
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
keyring==25.6.0
launchpadlib==2.1.0
lazr.restfulclient==0.14.6
lazr.uri==1.0.7
license-expression==30.4.1
macaroonbakery==1.3.4
MarkupSafe==3.0.2
mccabe==0.7.0
more-itertools==10.6.0
oauthlib==3.2.2
overrides==7.7.0
packaging @ file:///croot/packaging_1734472117206/work
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
protobuf==6.30.2
pycparser==2.22
pydantic==2.9.2
pydantic_core==2.23.4
pyfakefs==5.8.0
pygit2==1.14.1
pylint==3.3.6
pymacaroons==0.13.0
PyNaCl==1.5.0
pyparsing==3.2.3
pyRFC3339==1.1
pytest @ file:///croot/pytest_1738938843180/work
pytest-check==2.5.2
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-subprocess==1.5.3
python-dateutil==2.9.0.post0
python-debian==0.1.49
pytz==2025.2
pyxdg==0.28
PyYAML==6.0.2
referencing==0.36.2
requests==2.32.3
requests-toolbelt==1.0.0
requests-unixsocket2==0.4.2
responses==0.25.7
rpds-py==0.24.0
ruff==0.11.2
SecretStorage==3.3.3
six==1.17.0
snap-helpers==0.4.2
sniffio==1.3.1
sortedcontainers==2.4.0
tabulate==0.9.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tomlkit==0.13.2
typing_extensions==4.13.0
urllib3==2.3.0
wadllib==2.0.0
zipp==3.21.0
| name: charmcraft
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py310h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py310h06a4308_0
- pip=25.0=py310h06a4308_0
- pluggy=1.5.0=py310h06a4308_0
- pytest=8.3.4=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py310h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- annotated-types==0.7.0
- anyio==4.9.0
- astroid==3.3.9
- attrs==25.3.0
- backports-tarfile==1.2.0
- boolean-py==4.0
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charmcraft==3.2.2.post122+g0977fbf4
- charset-normalizer==3.4.1
- coverage==7.8.0
- craft-application==4.10.0
- craft-archives==2.1.0
- craft-cli==3.0.0
- craft-grammar==2.0.3
- craft-parts==2.7.0
- craft-platforms==0.6.0
- craft-providers==2.2.0
- craft-store==3.2.1
- cryptography==44.0.2
- dill==0.3.9
- distro==1.9.0
- docker==7.1.0
- freezegun==1.5.1
- h11==0.14.0
- httpcore==1.0.7
- httplib2==0.22.0
- httpx==0.28.1
- humanize==4.12.2
- hypothesis==6.130.6
- idna==3.10
- importlib-metadata==8.6.1
- isort==6.0.1
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- jinja2==3.1.6
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- keyring==25.6.0
- launchpadlib==2.1.0
- lazr-restfulclient==0.14.6
- lazr-uri==1.0.7
- license-expression==30.4.1
- macaroonbakery==1.3.4
- markupsafe==3.0.2
- mccabe==0.7.0
- more-itertools==10.6.0
- oauthlib==3.2.2
- overrides==7.7.0
- platformdirs==4.3.7
- protobuf==6.30.2
- pycparser==2.22
- pydantic==2.9.2
- pydantic-core==2.23.4
- pyfakefs==5.8.0
- pygit2==1.14.1
- pylint==3.3.6
- pymacaroons==0.13.0
- pynacl==1.5.0
- pyparsing==3.2.3
- pyrfc3339==1.1
- pytest-check==2.5.2
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-subprocess==1.5.3
- python-dateutil==2.9.0.post0
- python-debian==0.1.49
- pytz==2025.2
- pyxdg==0.28
- pyyaml==6.0.2
- referencing==0.36.2
- requests==2.32.3
- requests-toolbelt==1.0.0
- requests-unixsocket2==0.4.2
- responses==0.25.7
- rpds-py==0.24.0
- ruff==0.11.2
- secretstorage==3.3.3
- six==1.17.0
- snap-helpers==0.4.2
- sniffio==1.3.1
- sortedcontainers==2.4.0
- tabulate==0.9.0
- tomlkit==0.13.2
- typing-extensions==4.13.0
- urllib3==2.3.0
- wadllib==2.0.0
- zipp==3.21.0
prefix: /opt/conda/envs/charmcraft
| [
"tests/unit/commands/test_store.py::test_publish_lib_error"
] | [] | [
"tests/unit/commands/test_store.py::test_login_basic_no_export",
"tests/unit/commands/test_store.py::test_login_export[None-None-None-None-None]",
"tests/unit/commands/test_store.py::test_login_export[None-None-None-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[None-None-None-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[None-None-None-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[None-None-channel1-None-None]",
"tests/unit/commands/test_store.py::test_login_export[None-None-channel1-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[None-None-channel1-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[None-None-channel1-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[None-permission1-None-None-None]",
"tests/unit/commands/test_store.py::test_login_export[None-permission1-None-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[None-permission1-None-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[None-permission1-None-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[None-permission1-channel1-None-None]",
"tests/unit/commands/test_store.py::test_login_export[None-permission1-channel1-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[None-permission1-channel1-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[None-permission1-channel1-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[None-permission2-None-None-None]",
"tests/unit/commands/test_store.py::test_login_export[None-permission2-None-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[None-permission2-None-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[None-permission2-None-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[None-permission2-channel1-None-None]",
"tests/unit/commands/test_store.py::test_login_export[None-permission2-channel1-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[None-permission2-channel1-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[None-permission2-channel1-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[0-None-None-None-None]",
"tests/unit/commands/test_store.py::test_login_export[0-None-None-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[0-None-None-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[0-None-None-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[0-None-channel1-None-None]",
"tests/unit/commands/test_store.py::test_login_export[0-None-channel1-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[0-None-channel1-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[0-None-channel1-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[0-permission1-None-None-None]",
"tests/unit/commands/test_store.py::test_login_export[0-permission1-None-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[0-permission1-None-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[0-permission1-None-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[0-permission1-channel1-None-None]",
"tests/unit/commands/test_store.py::test_login_export[0-permission1-channel1-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[0-permission1-channel1-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[0-permission1-channel1-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[0-permission2-None-None-None]",
"tests/unit/commands/test_store.py::test_login_export[0-permission2-None-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[0-permission2-None-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[0-permission2-None-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[0-permission2-channel1-None-None]",
"tests/unit/commands/test_store.py::test_login_export[0-permission2-channel1-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[0-permission2-channel1-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[0-permission2-channel1-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-None-None-None-None]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-None-None-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-None-None-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-None-None-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-None-channel1-None-None]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-None-channel1-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-None-channel1-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-None-channel1-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission1-None-None-None]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission1-None-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission1-None-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission1-None-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission1-channel1-None-None]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission1-channel1-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission1-channel1-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission1-channel1-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission2-None-None-None]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission2-None-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission2-None-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission2-None-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission2-channel1-None-None]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission2-channel1-None-charm1]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission2-channel1-bundle1-None]",
"tests/unit/commands/test_store.py::test_login_export[36893488147419103232-permission2-channel1-bundle1-charm1]",
"tests/unit/commands/test_store.py::test_set_resource_architectures_output_json[updates0-expected0]",
"tests/unit/commands/test_store.py::test_set_resource_architectures_output_json[updates1-expected1]",
"tests/unit/commands/test_store.py::test_set_resource_architectures_output_table[updates0-No",
"tests/unit/commands/test_store.py::test_set_resource_architectures_output_table[updates1-",
"tests/unit/commands/test_store.py::test_fetch_libs_no_charm_libs",
"tests/unit/commands/test_store.py::test_fetch_libs_missing_from_store[libs0-Could",
"tests/unit/commands/test_store.py::test_fetch_libs_missing_from_store[libs1-Could",
"tests/unit/commands/test_store.py::test_fetch_libs_no_content[libs0-store_libs0-dl_lib0-Store",
"tests/unit/commands/test_store.py::test_fetch_libs_success[libs0-store_libs0-dl_lib0-Store",
"tests/unit/commands/test_store.py::test_register_bundle_warning",
"tests/unit/commands/test_store.py::test_register_bundle_error"
] | [] | Apache License 2.0 | 20,504 | 290 | [
"charmcraft/application/commands/store.py"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.