instance_id
stringlengths 12
57
| base_commit
stringlengths 40
40
| created_at
stringdate 2015-01-06 14:05:07
2025-04-29 17:56:51
| environment_setup_commit
stringlengths 40
40
| hints_text
stringlengths 0
158k
| patch
stringlengths 261
20.8k
| problem_statement
stringlengths 11
52.5k
| repo
stringlengths 7
53
| test_patch
stringlengths 280
206k
| meta
dict | version
stringclasses 463
values | install_config
dict | requirements
stringlengths 93
34k
⌀ | environment
stringlengths 772
20k
⌀ | FAIL_TO_PASS
sequencelengths 1
856
| FAIL_TO_FAIL
sequencelengths 0
536
| PASS_TO_PASS
sequencelengths 0
7.87k
| PASS_TO_FAIL
sequencelengths 0
92
| license_name
stringclasses 35
values | __index_level_0__
int64 11
21.4k
| num_tokens_patch
int64 103
4.99k
| before_filepaths
sequencelengths 0
14
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
sdispater__tomlkit-168 | 7b450661e02d161cbf9a3bec3b3955cbcb64efef | 2022-01-27 11:08:07 | 7b450661e02d161cbf9a3bec3b3955cbcb64efef | diff --git a/tomlkit/api.py b/tomlkit/api.py
index 9a58be1..c4cc07f 100644
--- a/tomlkit/api.py
+++ b/tomlkit/api.py
@@ -8,6 +8,7 @@ from typing import Union
from ._utils import parse_rfc3339
from .container import Container
+from .exceptions import UnexpectedCharError
from .items import AoT
from .items import Array
from .items import Bool
@@ -232,7 +233,11 @@ def value(raw: str) -> _Item:
>>> value("[1, 2, 3]")
[1, 2, 3]
"""
- return Parser(raw)._parse_value()
+ parser = Parser(raw)
+ v = parser._parse_value()
+ if not parser.end():
+ raise parser.parse_error(UnexpectedCharError, char=parser._current)
+ return v
def key_value(src: str) -> Tuple[Key, _Item]:
diff --git a/tomlkit/parser.py b/tomlkit/parser.py
index e3d0487..e1f64df 100644
--- a/tomlkit/parser.py
+++ b/tomlkit/parser.py
@@ -123,11 +123,11 @@ class Parser:
"""
self._src.mark()
- def parse_error(self, exception=ParseError, *args):
+ def parse_error(self, exception=ParseError, *args, **kwargs):
"""
Creates a generic "parse error" at the current position.
"""
- return self._src.parse_error(exception, *args)
+ return self._src.parse_error(exception, *args, **kwargs)
def parse(self) -> TOMLDocument:
body = TOMLDocument(True)
@@ -1082,7 +1082,7 @@ class Parser:
with self._state(restore=True):
buf = ""
for _ in range(n):
- if self._current not in " \t\n\r#,]}":
+ if self._current not in " \t\n\r#,]}" + self._src.EOF:
buf += self._current
self.inc()
continue
diff --git a/tomlkit/source.py b/tomlkit/source.py
index 06882c2..6f82d94 100644
--- a/tomlkit/source.py
+++ b/tomlkit/source.py
@@ -147,7 +147,7 @@ class Source(str):
# failed to consume minimum number of characters
if min > 0:
- self.parse_error(UnexpectedCharError, self.current)
+ raise self.parse_error(UnexpectedCharError, self.current)
def end(self) -> bool:
"""
@@ -162,14 +162,17 @@ class Source(str):
self._marker = self._idx
def parse_error(
- self, exception: Type[ParseError] = ParseError, *args: Any
+ self,
+ exception: Type[ParseError] = ParseError,
+ *args: Any,
+ **kwargs: Any,
) -> ParseError:
"""
Creates a generic "parse error" at the current position.
"""
line, col = self._to_linecol()
- return exception(line, col, *args)
+ return exception(line, col, *args, **kwargs)
def _to_linecol(self) -> Tuple[int, int]:
cur = 0
| Bug: tomlkit.value("test") returns True
Since tomlkit `0.8.0` all strings starting with lowercase letter `t` are converted to `True` and all strings starting with `f` are coverted to `False` if using the `tomlkit.value(...)` call.
E.g.: Install tomlkit `0.8.0` into a fresh venv and using `python3`:
```sh
❯ python3
Python 3.10.0 (default, Oct 22 2021, 13:24:07) [Clang 13.0.0 (clang-1300.0.29.3)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import tomlkit
>>> tomlkit.value("test")
True
>>> tomlkit.value("fest")
False
>>> tomlkit.value("NoTest")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/robot/test/test123/lib/python3.10/site-packages/tomlkit/api.py", line 149, in value
return Parser(raw)._parse_value()
File "/Users/robot/test/test123/lib/python3.10/site-packages/tomlkit/parser.py", line 598, in _parse_value
raise self.parse_error(UnexpectedCharError, c)
tomlkit.exceptions.UnexpectedCharError: Unexpected character: 'N' at line 1 col 0
```
**I would expect that all these strings throw an error like it is done in `0.7.2`.** | sdispater/tomlkit | diff --git a/tests/test_api.py b/tests/test_api.py
index 4d24844..2cd360f 100644
--- a/tests/test_api.py
+++ b/tests/test_api.py
@@ -301,3 +301,76 @@ def test_add_dotted_key():
table = tomlkit.table()
table.add(tomlkit.key(["foo", "bar"]), 1)
assert table.as_string() == "foo.bar = 1\n"
+
+
[email protected](
+ ("raw", "expected"),
+ [
+ ("true", True),
+ ("false", False),
+ ],
+)
+def test_value_parses_boolean(raw, expected):
+ parsed = tomlkit.value(raw)
+ assert parsed == expected
+
+
[email protected](
+ "raw", ["t", "f", "tru", "fals", "test", "friend", "truthy", "falsify"]
+)
+def test_value_rejects_values_looking_like_bool_at_start(raw):
+ """Reproduces https://github.com/sdispater/tomlkit/issues/165"""
+ with pytest.raises(tomlkit.exceptions.ParseError):
+ tomlkit.value(raw)
+
+
[email protected](
+ "raw",
+ [
+ "truee",
+ "truely",
+ "true-thoughts",
+ "true_hip_hop",
+ ],
+)
+def test_value_rejects_values_having_true_prefix(raw):
+ """Values that have ``true`` or ``false`` as prefix but then have additional chars are rejected."""
+ with pytest.raises(tomlkit.exceptions.ParseError):
+ tomlkit.value(raw)
+
+
[email protected](
+ "raw",
+ [
+ "falsee",
+ "falsely",
+ "false-ideas",
+ "false_prophet",
+ ],
+)
+def test_value_rejects_values_having_false_prefix(raw):
+ """Values that have ``true`` or ``false`` as prefix but then have additional chars are rejected."""
+ with pytest.raises(tomlkit.exceptions.ParseError):
+ tomlkit.value(raw)
+
+
[email protected](
+ "raw",
+ [
+ '"foo"1.2',
+ "truefalse",
+ "1.0false",
+ "100true",
+ "truetrue",
+ "falsefalse",
+ "1.2.3.4",
+ "[][]",
+ "{a=[][]}[]",
+ "true[]",
+ "false{a=1}",
+ ],
+)
+def test_value_rejects_values_with_appendage(raw):
+ """Values that appear valid at the beginning but leave chars unparsed are rejected."""
+ with pytest.raises(tomlkit.exceptions.ParseError):
+ tomlkit.value(raw)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 3
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": [
"git submodule update --init --recursive"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/sdispater/tomlkit.git@7b450661e02d161cbf9a3bec3b3955cbcb64efef#egg=tomlkit
| name: tomlkit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- tomlkit==0.8.0
prefix: /opt/conda/envs/tomlkit
| [
"tests/test_api.py::test_value_rejects_values_looking_like_bool_at_start[t]",
"tests/test_api.py::test_value_rejects_values_looking_like_bool_at_start[f]",
"tests/test_api.py::test_value_rejects_values_looking_like_bool_at_start[tru]",
"tests/test_api.py::test_value_rejects_values_looking_like_bool_at_start[fals]",
"tests/test_api.py::test_value_rejects_values_looking_like_bool_at_start[test]",
"tests/test_api.py::test_value_rejects_values_looking_like_bool_at_start[friend]",
"tests/test_api.py::test_value_rejects_values_looking_like_bool_at_start[truthy]",
"tests/test_api.py::test_value_rejects_values_looking_like_bool_at_start[falsify]",
"tests/test_api.py::test_value_rejects_values_having_true_prefix[truee]",
"tests/test_api.py::test_value_rejects_values_having_true_prefix[truely]",
"tests/test_api.py::test_value_rejects_values_having_true_prefix[true-thoughts]",
"tests/test_api.py::test_value_rejects_values_having_true_prefix[true_hip_hop]",
"tests/test_api.py::test_value_rejects_values_having_false_prefix[falsee]",
"tests/test_api.py::test_value_rejects_values_having_false_prefix[falsely]",
"tests/test_api.py::test_value_rejects_values_having_false_prefix[false-ideas]",
"tests/test_api.py::test_value_rejects_values_having_false_prefix[false_prophet]",
"tests/test_api.py::test_value_rejects_values_with_appendage[\"foo\"1.2]",
"tests/test_api.py::test_value_rejects_values_with_appendage[truefalse]",
"tests/test_api.py::test_value_rejects_values_with_appendage[truetrue]",
"tests/test_api.py::test_value_rejects_values_with_appendage[falsefalse]",
"tests/test_api.py::test_value_rejects_values_with_appendage[[][]]",
"tests/test_api.py::test_value_rejects_values_with_appendage[true[]]",
"tests/test_api.py::test_value_rejects_values_with_appendage[false{a=1}]"
] | [] | [
"tests/test_api.py::test_parse_can_parse_valid_toml_files[example]",
"tests/test_api.py::test_parse_can_parse_valid_toml_files[fruit]",
"tests/test_api.py::test_parse_can_parse_valid_toml_files[hard]",
"tests/test_api.py::test_parse_can_parse_valid_toml_files[sections_with_same_start]",
"tests/test_api.py::test_parse_can_parse_valid_toml_files[pyproject]",
"tests/test_api.py::test_parse_can_parse_valid_toml_files[0.5.0]",
"tests/test_api.py::test_parse_can_parse_valid_toml_files[test]",
"tests/test_api.py::test_parse_can_parse_valid_toml_files[newline_in_strings]",
"tests/test_api.py::test_parse_can_parse_valid_toml_files[preserve_quotes_in_string]",
"tests/test_api.py::test_parse_can_parse_valid_toml_files[string_slash_whitespace_newline]",
"tests/test_api.py::test_parse_can_parse_valid_toml_files[table_names]",
"tests/test_api.py::test_load_from_file_object[example]",
"tests/test_api.py::test_load_from_file_object[fruit]",
"tests/test_api.py::test_load_from_file_object[hard]",
"tests/test_api.py::test_load_from_file_object[sections_with_same_start]",
"tests/test_api.py::test_load_from_file_object[pyproject]",
"tests/test_api.py::test_load_from_file_object[0.5.0]",
"tests/test_api.py::test_load_from_file_object[test]",
"tests/test_api.py::test_load_from_file_object[newline_in_strings]",
"tests/test_api.py::test_load_from_file_object[preserve_quotes_in_string]",
"tests/test_api.py::test_load_from_file_object[string_slash_whitespace_newline]",
"tests/test_api.py::test_load_from_file_object[table_names]",
"tests/test_api.py::test_parsed_document_are_properly_json_representable[0.5.0]",
"tests/test_api.py::test_parsed_document_are_properly_json_representable[pyproject]",
"tests/test_api.py::test_parsed_document_are_properly_json_representable[table_names]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[section_with_trailing_characters-UnexpectedCharError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[key_value_with_trailing_chars-UnexpectedCharError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[array_with_invalid_chars-UnexpectedCharError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[invalid_number-InvalidNumberError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[invalid_date-InvalidDateError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[invalid_time-InvalidTimeError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[invalid_datetime-InvalidDateTimeError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[trailing_comma-UnexpectedCharError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[newline_in_singleline_string-InvalidControlChar]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[string_slash_whitespace_char-InvalidCharInStringError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[array_no_comma-UnexpectedCharError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[array_duplicate_comma-UnexpectedCharError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[array_leading_comma-UnexpectedCharError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[inline_table_no_comma-UnexpectedCharError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[inline_table_duplicate_comma-UnexpectedCharError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[inline_table_leading_comma-UnexpectedCharError]",
"tests/test_api.py::test_parse_raises_errors_for_invalid_toml_files[inline_table_trailing_comma-UnexpectedCharError]",
"tests/test_api.py::test_original_string_and_dumped_string_are_equal[example]",
"tests/test_api.py::test_original_string_and_dumped_string_are_equal[fruit]",
"tests/test_api.py::test_original_string_and_dumped_string_are_equal[hard]",
"tests/test_api.py::test_original_string_and_dumped_string_are_equal[sections_with_same_start]",
"tests/test_api.py::test_original_string_and_dumped_string_are_equal[pyproject]",
"tests/test_api.py::test_original_string_and_dumped_string_are_equal[0.5.0]",
"tests/test_api.py::test_original_string_and_dumped_string_are_equal[test]",
"tests/test_api.py::test_original_string_and_dumped_string_are_equal[table_names]",
"tests/test_api.py::test_a_raw_dict_can_be_dumped",
"tests/test_api.py::test_mapping_types_can_be_dumped",
"tests/test_api.py::test_dumps_weird_object",
"tests/test_api.py::test_dump_to_file_object",
"tests/test_api.py::test_integer",
"tests/test_api.py::test_float",
"tests/test_api.py::test_boolean",
"tests/test_api.py::test_date",
"tests/test_api.py::test_time",
"tests/test_api.py::test_datetime",
"tests/test_api.py::test_array",
"tests/test_api.py::test_table",
"tests/test_api.py::test_inline_table",
"tests/test_api.py::test_aot",
"tests/test_api.py::test_key",
"tests/test_api.py::test_key_value",
"tests/test_api.py::test_string",
"tests/test_api.py::test_item_dict_to_table",
"tests/test_api.py::test_item_mixed_aray",
"tests/test_api.py::test_build_super_table",
"tests/test_api.py::test_add_dotted_key",
"tests/test_api.py::test_value_parses_boolean[true-True]",
"tests/test_api.py::test_value_parses_boolean[false-False]",
"tests/test_api.py::test_value_rejects_values_with_appendage[1.0false]",
"tests/test_api.py::test_value_rejects_values_with_appendage[100true]",
"tests/test_api.py::test_value_rejects_values_with_appendage[1.2.3.4]",
"tests/test_api.py::test_value_rejects_values_with_appendage[{a=[][]}[]]"
] | [] | MIT License | 12,085 | 815 | [
"tomlkit/api.py",
"tomlkit/parser.py",
"tomlkit/source.py"
] |
|
beartype__beartype-88 | 4190bc3112d0f56d81af586a5f01a10c3e19feae | 2022-01-27 14:32:29 | 4190bc3112d0f56d81af586a5f01a10c3e19feae | codecov[bot]: # [Codecov](https://codecov.io/gh/beartype/beartype/pull/88?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=beartype) Report
> Merging [#88](https://codecov.io/gh/beartype/beartype/pull/88?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=beartype) (6b44680) into [main](https://codecov.io/gh/beartype/beartype/commit/7149ca7bacc44a9aca86a4f9212dff48ebc57ca4?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=beartype) (7149ca7) will **decrease** coverage by `0.06%`.
> The diff coverage is `100.00%`.
[](https://codecov.io/gh/beartype/beartype/pull/88?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=beartype)
```diff
@@ Coverage Diff @@
## main #88 +/- ##
==========================================
- Coverage 96.68% 96.62% -0.07%
==========================================
Files 118 118
Lines 4379 4388 +9
==========================================
+ Hits 4234 4240 +6
- Misses 145 148 +3
```
| [Impacted Files](https://codecov.io/gh/beartype/beartype/pull/88?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=beartype) | Coverage Δ | |
|---|---|---|
| [beartype/\_util/func/utilfuncmake.py](https://codecov.io/gh/beartype/beartype/pull/88/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=beartype#diff-YmVhcnR5cGUvX3V0aWwvZnVuYy91dGlsZnVuY21ha2UucHk=) | `98.18% <100.00%> (+0.35%)` | :arrow_up: |
| [beartype/cave/\_cavelib.py](https://codecov.io/gh/beartype/beartype/pull/88/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=beartype#diff-YmVhcnR5cGUvY2F2ZS9fY2F2ZWxpYi5weQ==) | `90.24% <0.00%> (-4.88%)` | :arrow_down: |
| [beartype/\_util/mod/utilmodtest.py](https://codecov.io/gh/beartype/beartype/pull/88/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=beartype#diff-YmVhcnR5cGUvX3V0aWwvbW9kL3V0aWxtb2R0ZXN0LnB5) | `89.58% <0.00%> (-2.09%)` | :arrow_down: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/beartype/beartype/pull/88?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=beartype).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=beartype)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/beartype/beartype/pull/88?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=beartype). Last update [7149ca7...6b44680](https://codecov.io/gh/beartype/beartype/pull/88?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=beartype). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=beartype).
posita: @TeamSpen210 did the heavy lifting here (see #84). I just typed it in.
So this is neat:
``` python
# test.py
from beartype import beartype, BeartypeConf
@beartype(conf=BeartypeConf(is_print_wrapper_code=True))
def foo(i: int) -> str:
breakpoint()
return str(i)
print(foo(0))
```
``` sh
python test.py
(line 0001) def foo(
(line 0002) *args,
(line 0003) __beartype_func=__beartype_func, # is <function foo at 0x106c249d0>
…
(line 0041) )
(line 0042)
(line 0043) return __beartype_pith_0
> /…/test.py(7)foo()
-> return str(i)
(Pdb) bt
/…/test.py(9)<module>()
-> print(foo(0))
<@beartype(foo) at 0x106c249d0>(29)foo()
-> __beartype_pith_0 = __beartype_func(*args, **kwargs)
> /…/test.py(7)foo()
-> return str(i)
(Pdb) u
> <@beartype(foo) at 0x106c249d0>(29)foo()
-> __beartype_pith_0 = __beartype_func(*args, **kwargs)
(Pdb) l
24 pith_value=__beartype_pith_0,
25 )
26
27 # Call this function with all passed parameters and localize the value
28 # returned from this call.
29 -> __beartype_pith_0 = __beartype_func(*args, **kwargs)
30
31 # Noop required to artificially increase indentation level. Note that
32 # CPython implicitly optimizes this conditional away. Isn't that nice?
33 if True:
34 # Type-check this passed parameter or return value against this
(Pdb) c
0
```
posita: I have no idea what [that error](https://github.com/beartype/beartype/runs/4968911087?check_suite_focus=true) is about. 😕
leycec: **This so hot.** Plentiful bear hugs to the crack commando squad that made this happen. What cannot @posita and @TeamSpen210 do when their formidable powers combine? We are now finding out.
I'd better merge this ASAP before I accidentally break everything by drunkenly stumbling back into the `utilfuncmake` submodule on a frostbitten Saturday evening. Three questions for the restless crowd angrily thumping the armrests of their La-Z-Boys:
* The option name `is_print_wrapper_code`. It is verbose, overly specific, and I already grow weary of seeing it everywhere. But laziness prevails, because I else have yet to release `beartype 0.10.0` or publicly document anything. This PR generalizes that option to *also* cache that code for debuggability – which is great, because debuggability was the entire point of that option. Right? You are now wondering: "Where is the question we were promised." Okay, already! Down, angry rabble! Down! The question is:
* Should we just name this option `is_debug` instead? My initial reasoning for that name was that we'd have different kinds of debugging options – like, say, `is_log_wrapper_code` or `is_linecache_wrapper_code`. That now seems like gross overkill. If anyone furiously demands deeper debugging options, we can always define those later. So... `is_debug`? Thumbs up? Facepalm? Double facepalm, even? :grimacing:
* Kinda afraid to ask what's going on down below, but gonna ask anyway. What's going on down below? ↓
``` python
finalize(func, lambda: linecache.cache.pop(linecache_file_name, None))
```
* This seem kinda sus and I am squinting af:
``` python
linecache_file_name = f"<@beartype({func_name}) at {id(func_wrapped):#x}>"
...
# FIXME: Do we need a loop here to avoid name clashes (a la
# _make_method in
# <https://github.com/python-attrs/attrs/blob/main/src/attr/_make.py>)?
# Probobably unlikely, but there is this (from
# <https://docs.python.org/3/library/functions.html#id>): "Two objects
# with non-overlapping lifetimes may have the same id() value."
linecache.cache[linecache_file_name] = ( # type: ignore[assignment]
```
As you astutely note, object IDs are technically ambiguous across the lifecycle of a Python process. Since we can't disambiguate on that, let's disambiguate on the `func_name` instead! Specifically, let's conditionally expand the assignment of `linecache_file_name` to prefer a fully-qualified function name cobbled together from the passed wrapper function if passed: e.g.,
``` python
func_full_name = (
f'{func_wrapped.__module__}{func_wrapped.__name__}
if func_wrapped else
func_name
)
linecache_file_name = f"<@beartype({func_full_name}) at {id(func_wrapped):#x}>"
```
Since `@beartype` always passes the currently decorated callable as the `func_wrapped` parameter to `make_func`, this *should* have the intended effect of disambiguating cache entries on fully-qualified callable names – which *should* be fully disambiguous across the lifecycle of a Python process (assuming no shenanigans, because let's not even go there, because my forehead capillary is already bursting out).
posita: ## On `is_debug`
> * Should we just name this option `is_debug` instead? My initial reasoning for that name was that we'd have different kinds of debugging options – like, say, `is_log_wrapper_code` or `is_linecache_wrapper_code`. That now seems like gross overkill. If anyone furiously demands deeper debugging options, we can always define those later. So... `is_debug`? Thumbs up? Facepalm? Double facepalm, even? 😬
My vote is double secret probation triple-lindy thumbs up. #90 is up to see what that looks like. If that works for you, I can rebase this PR onto that one.
## On `finalize`
> * Kinda afraid to ask what's going on down below, but gonna ask anyway. What's going on down below? ↓
>
> ```python
> finalize(func, lambda: linecache.cache.pop(linecache_file_name, None))
> ```
You are right to inquire. Your allusion to La-Z-Boys could describe me in ~writing that~ _obfuscating what was going on there_. I have attempted to make it a bit more accessible. I'll reproduce that attempt here:
``` python
# Define and register a cleanup callback for removing func's linecache
# entry if func is ever garbage collected.
def remove_linecache_entry_for_func():
linecache.cache.pop(linecache_file_name, None)
finalize(func, remove_linecache_entry_for_func)
```
## Regarding uniqueness of file names
I like your proposal and adopted it with vigor (and added a comment).
## Side trip: Should "exec" be "single" instead? Does it matter?
Regarding this…
``` python
#FIXME: Should "exec" be "single" instead? Does it matter? Is there any
#performance gap between the two?
```
While I was messing with all of this, I figured I'd try to answer that because I was curious myself. In my superficial testing, `"exec"` is at worst about 1 µs slower than `"single"`, so not really a performance issue.
Here's my IPython test script:
``` python
# foo.ipy
code = """
class Foo:
def __init__(self):
self._foo = foo(1)
@property
def foo(self) -> int:
return int(self._foo)
"""
%timeit compile(code, "foo.py", "exec")
%timeit compile(code, "foo.py", "single")
```
These are typical for my laptop:
``` sh
% ipython foo.ipy
38.9 µs ± 307 ns per loop (mean ± std. dev. of 7 runs, 10,000 loops each)
37.4 µs ± 444 ns per loop (mean ± std. dev. of 7 runs, 10,000 loops each)
% ipython foo.ipy
38.5 µs ± 454 ns per loop (mean ± std. dev. of 7 runs, 10,000 loops each)
39 µs ± 1.32 µs per loop (mean ± std. dev. of 7 runs, 10,000 loops each)
% ipython foo.ipy
40.6 µs ± 1.39 µs per loop (mean ± std. dev. of 7 runs, 10,000 loops each)
39.8 µs ± 1.13 µs per loop (mean ± std. dev. of 7 runs, 10,000 loops each)
```
The only other reason I could think of to use `"single"` is that it acts as a "safeguard" of sorts by limiting the generated code to defining a single thing. For example, `"single"` will fail on the following, where `"exec"` won't:
``` python
class Foo:
def __init__(self):
self._foo = foo(1)
@property
def foo(self) -> int:
return int(self._foo)
def foo(a: int) -> str: # <-- "single" will see this second thing as a syntax error
return str(a)
```
I removed your `FIXME` and kept the `"exec"`, but let me know if I should change it to `"single"` and/or add a comment explaining any reasoning.
TeamSpen210: `weakref.finalize()` is a tool which uses the weakref system to effectively add on a `__del__()` method to an object - it stores a callable, when the object dies it gets called. So this ensures if the original function is deleted (say you're beartyping a closure, perhaps?) we go and try to clear the linecache entry. `pop`'s desirable to ensure if someone else cleared the cache or something it doesn't error.
Ah, ninjaed.
posita: Rebased in light of #90 landing.
leycec: 
When a brilliance is so bright that it caustically sears itself into the mucosal lining of the back of your retinas, it's a safe bet @posita and @TeamSpen210 teamed up yet again to radically transform our humble backwater world into the utopian Niven ring of tomorrow, today.
Thanks *sooo* much for the tremendous contribution. Hot off the presses:
> **Crack :bear: Squad Crushes Another Tough Nut.** | diff --git a/beartype/_util/func/utilfuncmake.py b/beartype/_util/func/utilfuncmake.py
index 24cc669c..2bf3b6de 100644
--- a/beartype/_util/func/utilfuncmake.py
+++ b/beartype/_util/func/utilfuncmake.py
@@ -20,6 +20,8 @@ from beartype._util.text.utiltextmunge import number_lines
from collections.abc import Callable
from functools import update_wrapper
from typing import Optional, Type
+from weakref import finalize
+import linecache
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
@@ -152,33 +154,29 @@ def make_func(
# Python fails to capture that function (i.e., expose that function to
# this function) when the locals() dictionary is passed; instead, a
# unique local dictionary *MUST* be passed.
- #
- # Note that the same result may also be achieved via the compile()
- # builtin and "types.FunctionType" class: e.g.,
- # func_code_compiled = compile(
- # func_code, "<string>", "exec").co_consts[0]
- # return types.FunctionType(
- # code=func_code_compiled,
- # globals=_GLOBAL_ATTRS,
- # argdefs=('__beartype_func', func)
- # )
- #
- # Since doing so is both more verbose and obfuscatory for no tangible
- # gain, the current circumspect approach is preferred.
- exec(func_code, func_globals, func_locals)
-
- #FIXME: See above.
- #FIXME: Should "exec" be "single" instead? Does it matter? Is there any
- #performance gap between the two?
- # func_code_compiled = compile(
- # func_code, func_wrapper_filename, "exec").co_consts[0]
- # return FunctionType(
- # code=func_code_compiled,
- # globals=_GLOBAL_ATTRS,
- #
- # #FIXME: This really doesn't seem right, but... *shrug*
- # argdefs=tuple(local_attrs.values()),
- # )
+
+ # Make up a filename for compilation and possibly the linecache entry
+ # (if we make one). A fully-qualified name and ID *should* be unique
+ # for the life of the process.
+ func_full_name = (
+ f"{func_wrapped.__module__}{func_wrapped.__name__}"
+ if func_wrapped else
+ func_name
+ )
+ linecache_file_name = f"<@beartype({func_full_name}) at {id(func_wrapped):#x}>"
+
+ # We use the more verbose and obfuscatory compile() builtin instead of
+ # simply calling exec(func_code, func_globals, func_locals) because
+ # exec does not provide a way to set the resulting function object's
+ # .__code__.co_filename read-only attribute. We can use "single"
+ # instead of "exec" if we are willing to accept that func_code is
+ # constrained to a single statement. In casual testing, there is very
+ # little performance difference (with an imperceptibly slight edge
+ # going to "single").
+ func_code_compiled = compile(
+ func_code, linecache_file_name, "exec")
+ assert func_name not in func_locals
+ exec(func_code_compiled, func_globals, func_locals)
# If doing so fails for any reason...
except Exception as exception:
# Raise an exception suffixed by that function's declaration such that
@@ -235,6 +233,23 @@ def make_func(
func.__doc__ = func_doc
# Else, that function is undocumented.
+ # Since we went through the trouble of printing its definition, we might
+ # as well make its compiled version debuggable, too.
+ if is_debug:
+ linecache.cache[linecache_file_name] = ( # type: ignore[assignment]
+ len(func_code), # type: ignore[assignment] # Y u gotta b diff'rnt Python 3.7? WHY?!
+ None, # mtime, but should be None to avoid being discarded
+ func_code.splitlines(keepends=True),
+ linecache_file_name,
+ )
+
+ # Define and register a cleanup callback for removing func's linecache
+ # entry if func is ever garbage collected.
+ def remove_linecache_entry_for_func():
+ linecache.cache.pop(linecache_file_name, None)
+
+ finalize(func, remove_linecache_entry_for_func)
+
# Return that function.
return func
| [Feature Request] Debuggable wrapper functions
`@beartype` dynamically wraps each decorated callable with a unique wrapper function efficiently type-checking that callable. That's great, because efficient. But that's also currently invisible to debuggers, because `@beartype` fails to register the Python code underlying these wrapper functions with [the little-known standard `linecache` module](https://docs.python.org/3/library/linecache.html).
This feature request tracks work towards rendering `@beartype` compatible with debuggers. For now, please refer to [this superb writeup by witty QA strongman @posita for a detailed discussion of the exact issue under discussion here – complete with the inevitable working solution infused with wisdom by cautionary AI futurologist @TeamSpen210](https://github.com/beartype/beartype/discussions/84).
Humble gratitude to both @posita and @TeamSpen210 for finally shaming me into doing this thing that desperately needs doing. | beartype/beartype | diff --git a/beartype_test/a00_unit/a90_decor/test_decorconf.py b/beartype_test/a00_unit/a90_decor/test_decorconf.py
index 98735bd7..0a33a891 100644
--- a/beartype_test/a00_unit/a90_decor/test_decorconf.py
+++ b/beartype_test/a00_unit/a90_decor/test_decorconf.py
@@ -118,6 +118,52 @@ def test_decor_conf_is_debug(capsys) -> None:
assert '# is <function _earthquake ' in standard_captured.out
+def test_decor_conf_is_debug_updates_linecache(capsys) -> None:
+ '''
+ Test the :func:`beartype.beartype` decorator passed the optional ``conf``
+ parameter passed the optional ``is_debug`` parameter results
+ in an updated linecache.
+
+ Parameters
+ ----------
+ capsys
+ :mod:`pytest` fixture enabling standard output and error to be reliably
+ captured and tested against from within unit tests and fixtures.
+
+ Parameters
+ ----------
+ https://docs.pytest.org/en/latest/how-to/capture-stdout-stderr.html#accessing-captured-output-from-a-test-function
+ Official ``capsys`` reference documentation.
+ '''
+
+ # Defer heavyweight imports.
+ from beartype import BeartypeConf, beartype
+ import linecache
+
+ # @beartype subdecorator printing wrapper function definitions.
+ beartype_printing = beartype(conf=BeartypeConf(is_debug=True))
+
+ beartyped_earthquake = beartype_printing(_earthquake)
+
+ # Pytest object freezing the current state of standard output and error as
+ # uniquely written to by this unit test up to this statement.
+ standard_captured = capsys.readouterr()
+ standard_lines = standard_captured.out.splitlines(keepends=True)
+
+ # This is probably overkill, but check to see that we generated lines in
+ # our linecache that correspond to the ones we printed. This a fragile
+ # coupling, but we can relax this later to avoid making those line-by-line
+ # comparisons and just check for the decorated function's filename's
+ # presence in the cache.
+ assert beartyped_earthquake.__code__.co_filename in linecache.cache
+ code_len, mtime, code_lines, code_filename = linecache.cache[beartyped_earthquake.__code__.co_filename]
+ assert mtime is None
+ assert len(code_lines) == len(standard_lines)
+ for code_line, standard_line in zip(code_lines, standard_lines):
+ assert code_line in standard_line
+ assert code_filename == beartyped_earthquake.__code__.co_filename
+
+
def test_decor_conf_strategy() -> None:
'''
Test the :func:`beartype.beartype` decorator passed the optional ``conf``
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
babel==2.17.0
-e git+https://github.com/beartype/beartype.git@4190bc3112d0f56d81af586a5f01a10c3e19feae#egg=beartype
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
filelock==3.18.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
mypy==1.15.0
mypy-extensions==1.0.0
numpy==2.0.2
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
Pygments==2.19.1
pyproject-api==1.9.0
pytest==8.3.5
requests==2.32.3
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: beartype
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- babel==2.17.0
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- filelock==3.18.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mypy==1.15.0
- mypy-extensions==1.0.0
- numpy==2.0.2
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pygments==2.19.1
- pyproject-api==1.9.0
- pytest==8.3.5
- requests==2.32.3
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/beartype
| [
"beartype_test/a00_unit/a90_decor/test_decorconf.py::test_decor_conf_is_debug_updates_linecache"
] | [] | [
"beartype_test/a00_unit/a90_decor/test_decorconf.py::test_decor_conf",
"beartype_test/a00_unit/a90_decor/test_decorconf.py::test_decor_conf_is_debug",
"beartype_test/a00_unit/a90_decor/test_decorconf.py::test_decor_conf_strategy"
] | [] | MIT License | 12,088 | 1,105 | [
"beartype/_util/func/utilfuncmake.py"
] |
microsoft__debugpy-829 | 0105bd92ce003457fb556d734fce94c79356955f | 2022-01-27 17:51:36 | 356f665325a645a628669acd82d2a33323a26e5b | sonarcloud[bot]: Kudos, SonarCloud Quality Gate passed! 
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=BUG) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=BUG) [0 Bugs](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=BUG)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=VULNERABILITY) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=VULNERABILITY) [0 Vulnerabilities](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=VULNERABILITY)
[](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=829&resolved=false&types=SECURITY_HOTSPOT) [](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=829&resolved=false&types=SECURITY_HOTSPOT) [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=829&resolved=false&types=SECURITY_HOTSPOT)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=CODE_SMELL) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=CODE_SMELL) [0 Code Smells](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=CODE_SMELL)
[](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=829&metric=coverage&view=list) No Coverage information
[](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=829&metric=duplicated_lines_density&view=list) No Duplication information
sonarcloud[bot]: Kudos, SonarCloud Quality Gate passed! 
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=BUG) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=BUG) [0 Bugs](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=BUG)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=VULNERABILITY) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=VULNERABILITY) [0 Vulnerabilities](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=VULNERABILITY)
[](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=829&resolved=false&types=SECURITY_HOTSPOT) [](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=829&resolved=false&types=SECURITY_HOTSPOT) [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=829&resolved=false&types=SECURITY_HOTSPOT)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=CODE_SMELL) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=CODE_SMELL) [0 Code Smells](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=CODE_SMELL)
[](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=829&metric=coverage&view=list) No Coverage information
[](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=829&metric=duplicated_lines_density&view=list) No Duplication information
sonarcloud[bot]: Kudos, SonarCloud Quality Gate passed! 
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=BUG) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=BUG) [0 Bugs](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=BUG)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=VULNERABILITY) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=VULNERABILITY) [0 Vulnerabilities](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=VULNERABILITY)
[](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=829&resolved=false&types=SECURITY_HOTSPOT) [](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=829&resolved=false&types=SECURITY_HOTSPOT) [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=829&resolved=false&types=SECURITY_HOTSPOT)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=CODE_SMELL) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=CODE_SMELL) [0 Code Smells](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=829&resolved=false&types=CODE_SMELL)
[](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=829&metric=coverage&view=list) No Coverage information
[](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=829&metric=duplicated_lines_density&view=list) No Duplication information
| diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py
index 1af1a778..a50f302e 100644
--- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py
+++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py
@@ -3,7 +3,7 @@
"""
import pickle
from _pydevd_bundle.pydevd_constants import get_frame, get_current_thread_id, xrange, IS_PY2, \
- iter_chars, silence_warnings_decorator
+ iter_chars, silence_warnings_decorator, dict_iter_items
from _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate, get_type, var_to_xml
from _pydev_bundle import pydev_log
@@ -254,10 +254,13 @@ def _expression_to_evaluate(expression):
return expression
-def eval_in_context(expression, globals, locals):
+def eval_in_context(expression, globals, locals=None):
result = None
try:
- result = eval(_expression_to_evaluate(expression), globals, locals)
+ if locals is None:
+ result = eval(_expression_to_evaluate(expression), globals)
+ else:
+ result = eval(_expression_to_evaluate(expression), globals, locals)
except (Exception, KeyboardInterrupt):
etype, result, tb = sys.exc_info()
result = ExceptionOnEvaluate(result, etype, tb)
@@ -272,6 +275,8 @@ def eval_in_context(expression, globals, locals):
split = expression.split('.')
entry = split[0]
+ if locals is None:
+ locals = globals
curr = locals[entry] # Note: we want the KeyError if it's not there.
for entry in split[1:]:
if entry.startswith('__') and not hasattr(curr, entry):
@@ -347,6 +352,10 @@ def _evaluate_with_timeouts(original_func):
@functools.wraps(original_func)
def new_func(py_db, frame, expression, is_exec):
+ if py_db is None:
+ # Only for testing...
+ pydev_log.critical('_evaluate_with_timeouts called without py_db!')
+ return original_func(py_db, frame, expression, is_exec)
warn_evaluation_timeout = pydevd_constants.PYDEVD_WARN_EVALUATION_TIMEOUT
curr_thread = threading.current_thread()
@@ -374,6 +383,27 @@ def compile_as_eval(expression):
return compile(_expression_to_evaluate(expression), '<string>', 'eval')
+def _update_globals_and_locals(updated_globals, initial_globals, frame):
+ # We don't have the locals and passed all in globals, so, we have to
+ # manually choose how to update the variables.
+ #
+ # Note that the current implementation is a bit tricky: it does work in general
+ # but if we do something as 'some_var = 10' and 'some_var' is already defined to have
+ # the value '10' in the globals, we won't actually put that value in the locals
+ # (which means that the frame locals won't be updated).
+ # Still, the approach to have a single namespace was chosen because it was the only
+ # one that enabled creating and using variables during the same evaluation.
+ assert updated_globals is not None
+ changed = False
+ for key, val in dict_iter_items(updated_globals):
+ if initial_globals.get(key) is not val:
+ changed = True
+ frame.f_locals[key] = val
+
+ if changed:
+ pydevd_save_locals.save_locals(frame)
+
+
@_evaluate_with_timeouts
def evaluate_expression(py_db, frame, expression, is_exec):
'''
@@ -394,18 +424,46 @@ def evaluate_expression(py_db, frame, expression, is_exec):
if frame is None:
return
- # Note: not using frame.f_globals directly because we need variables to be mutated in that
- # context to support generator expressions (i.e.: the case below doesn't work unless
- # globals=locals) because a generator expression actually creates a new function context.
- # i.e.:
- # global_vars = {}
- # local_vars = {'ar':["foo", "bar"], 'y':"bar"}
- # print eval('all((x == y for x in ar))', global_vars, local_vars)
- # See: https://mail.python.org/pipermail/python-list/2009-January/522213.html
-
+ # This is very tricky. Some statements can change locals and use them in the same
+ # call (see https://github.com/microsoft/debugpy/issues/815), also, if locals and globals are
+ # passed separately, it's possible that one gets updated but apparently Python will still
+ # try to load from the other, so, what's done is that we merge all in a single dict and
+ # then go on and update the frame with the results afterwards.
+
+ # -- see tests in test_evaluate_expression.py
+
+ # This doesn't work because the variables aren't updated in the locals in case the
+ # evaluation tries to set a variable and use it in the same expression.
+ # updated_globals = frame.f_globals
+ # updated_locals = frame.f_locals
+
+ # This doesn't work because the variables aren't updated in the locals in case the
+ # evaluation tries to set a variable and use it in the same expression.
+ # updated_globals = {}
+ # updated_globals.update(frame.f_globals)
+ # updated_globals.update(frame.f_locals)
+ #
+ # updated_locals = frame.f_locals
+
+ # This doesn't work either in the case where the evaluation tries to set a variable and use
+ # it in the same expression (I really don't know why as it seems like this *should* work
+ # in theory but doesn't in practice).
+ # updated_globals = {}
+ # updated_globals.update(frame.f_globals)
+ #
+ # updated_locals = {}
+ # updated_globals.update(frame.f_locals)
+
+ # This is the only case that worked consistently to run the tests in test_evaluate_expression.py
+ # It's a bit unfortunate because although the exec works in this case, we have to manually
+ # put the updates in the frame locals afterwards.
updated_globals = {}
updated_globals.update(frame.f_globals)
- updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals
+ updated_globals.update(frame.f_locals)
+
+ initial_globals = updated_globals.copy()
+
+ updated_locals = None
try:
if IS_PY2 and isinstance(expression, unicode):
@@ -419,10 +477,16 @@ def evaluate_expression(py_db, frame, expression, is_exec):
# it will have whatever the user actually did)
compiled = compile_as_eval(expression)
except Exception:
- Exec(_expression_to_evaluate(expression), updated_globals, frame.f_locals)
- pydevd_save_locals.save_locals(frame)
+ compiled = None
+
+ if compiled is None:
+ try:
+ Exec(_expression_to_evaluate(expression), updated_globals, updated_locals)
+ finally:
+ # Update the globals even if it errored as it may have partially worked.
+ _update_globals_and_locals(updated_globals, initial_globals, frame)
else:
- result = eval(compiled, updated_globals, frame.f_locals)
+ result = eval(compiled, updated_globals, updated_locals)
if result is not None: # Only print if it's not None (as python does)
if IS_PY2 and isinstance(result, unicode):
encoding = sys.stdout.encoding
@@ -433,7 +497,7 @@ def evaluate_expression(py_db, frame, expression, is_exec):
return
else:
- ret = eval_in_context(expression, updated_globals, frame.f_locals)
+ ret = eval_in_context(expression, updated_globals, updated_locals)
try:
is_exception_returned = ret.__class__ == ExceptionOnEvaluate
except:
@@ -442,11 +506,13 @@ def evaluate_expression(py_db, frame, expression, is_exec):
if not is_exception_returned:
# i.e.: by using a walrus assignment (:=), expressions can change the locals,
# so, make sure that we save the locals back to the frame.
- pydevd_save_locals.save_locals(frame)
+ _update_globals_and_locals(updated_globals, initial_globals, frame)
return ret
finally:
# Should not be kept alive if an exception happens and this frame is kept in the stack.
del updated_globals
+ del updated_locals
+ del initial_globals
del frame
@@ -480,7 +546,7 @@ def change_attr_expression(frame, attr, expression, dbg, value=SENTINEL_VALUE):
pydevd_save_locals.save_locals(frame)
return frame.f_locals[attr]
- # default way (only works for changing it in the topmost frame)
+ # i.e.: case with '.' or save locals not available (just exec the assignment in the frame).
if value is SENTINEL_VALUE:
value = eval(expression, frame.f_globals, frame.f_locals)
result = value
| Comprehension list in Debug Console doesn't get updated
### VS Code version
1.63.2
### Extension version
v2021.7.1060902895
### OS type
Linux
### OS version
20.04
### Python distribution
Anaconda
### Python version
3.8
### Language server
Pylance
### Expected behaviour
If I'm stoped at some point of the code and in the Debug console I run the following:
```[python]
lis = ["abc","efg"]
for l in lis:
print(l)
result = [l[i] for i in range(3)]
print(result)
```
I expect the console to print:
"abc"
['a'.'b','c']
"efg"
['e','f','g']
### Actual behaviour
The response is:
abc
**['e', 'f', 'g']**
efg
['e', 'f', 'g']
### Steps to reproduce
Stop at any point of your code with a breakpoint, open the debug console and insert the code provided above.

### Logs
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct | microsoft/debugpy | diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py b/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py
new file mode 100644
index 00000000..881f55a5
--- /dev/null
+++ b/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py
@@ -0,0 +1,118 @@
+import sys
+from _pydevd_bundle.pydevd_constants import IS_PY38_OR_GREATER
+import pytest
+
+SOME_LST = ["foo", "bar"]
+BAR = "bar"
+FOO = "foo"
+global_frame = sys._getframe()
+
+
+def obtain_frame():
+ yield sys._getframe()
+
+
[email protected]
+def disable_critical_log():
+ # We want to hide the logging related to _evaluate_with_timeouts not receiving the py_db.
+ from _pydev_bundle.pydev_log import log_context
+ import io
+ stream = io.StringIO()
+ with log_context(0, stream):
+ yield
+
+
+def test_evaluate_expression_basic(disable_critical_log):
+ from _pydevd_bundle.pydevd_vars import evaluate_expression
+
+ def check(frame):
+ evaluate_expression(None, frame, 'some_var = 1', is_exec=True)
+
+ assert frame.f_locals['some_var'] == 1
+
+ check(next(iter(obtain_frame())))
+ assert 'some_var' not in sys._getframe().f_globals
+
+ # as locals == globals, this will also change the current globals
+ check(global_frame)
+ assert 'some_var' in sys._getframe().f_globals
+ del sys._getframe().f_globals['some_var']
+ assert 'some_var' not in sys._getframe().f_globals
+
+
+def test_evaluate_expression_1(disable_critical_log):
+ from _pydevd_bundle.pydevd_vars import evaluate_expression
+
+ def check(frame):
+ eval_txt = '''
+container = ["abc","efg"]
+results = []
+for s in container:
+ result = [s[i] for i in range(3)]
+ results.append(result)
+'''
+ evaluate_expression(None, frame, eval_txt, is_exec=True)
+ assert frame.f_locals['results'] == [['a', 'b', 'c'], ['e', 'f', 'g']]
+ assert frame.f_locals['s'] == "efg"
+
+ check(next(iter(obtain_frame())))
+
+ for varname in ['container', 'results', 's']:
+ assert varname not in sys._getframe().f_globals
+
+ check(global_frame)
+ for varname in ['container', 'results', 's']:
+ assert varname in sys._getframe().f_globals
+
+ for varname in ['container', 'results', 's']:
+ del sys._getframe().f_globals[varname]
+
+
+def test_evaluate_expression_2(disable_critical_log):
+ from _pydevd_bundle.pydevd_vars import evaluate_expression
+
+ def check(frame):
+ eval_txt = 'all((x in (BAR, FOO) for x in SOME_LST))'
+ assert evaluate_expression(None, frame, eval_txt, is_exec=False)
+
+ check(next(iter(obtain_frame())))
+ check(global_frame)
+
+
+def test_evaluate_expression_3(disable_critical_log):
+ if not IS_PY38_OR_GREATER:
+ return
+
+ from _pydevd_bundle.pydevd_vars import evaluate_expression
+
+ def check(frame):
+ eval_txt = '''11 if (some_var := 22) else 33'''
+ assert evaluate_expression(None, frame, eval_txt, is_exec=False) == 11
+
+ check(next(iter(obtain_frame())))
+ assert 'some_var' not in sys._getframe().f_globals
+
+ # as locals == globals, this will also change the current globals
+ check(global_frame)
+ assert 'some_var' in sys._getframe().f_globals
+ del sys._getframe().f_globals['some_var']
+ assert 'some_var' not in sys._getframe().f_globals
+
+
+def test_evaluate_expression_4(disable_critical_log):
+ from _pydevd_bundle.pydevd_vars import evaluate_expression
+
+ def check(frame):
+ eval_txt = '''import email;email.foo_value'''
+ with pytest.raises(AttributeError):
+ evaluate_expression(None, frame, eval_txt, is_exec=True)
+ assert 'email' in frame.f_locals
+
+ check(next(iter(obtain_frame())))
+ assert 'email' not in sys._getframe().f_globals
+
+ # as locals == globals, this will also change the current globals
+ check(global_frame)
+ assert 'email' in sys._getframe().f_globals
+ del sys._getframe().f_globals['email']
+ assert 'email' not in sys._getframe().f_globals
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"black",
"flake8",
"tox",
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | black==25.1.0
cachetools==5.5.2
chardet==5.2.0
click==8.1.8
colorama==0.4.6
coverage==7.8.0
-e git+https://github.com/microsoft/debugpy.git@0105bd92ce003457fb556d734fce94c79356955f#egg=debugpy
distlib==0.3.9
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
flake8==7.2.0
iniconfig==2.1.0
mccabe==0.7.0
mypy-extensions==1.0.0
packaging==24.2
pathspec==0.12.1
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
virtualenv==20.29.3
| name: debugpy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- black==25.1.0
- cachetools==5.5.2
- chardet==5.2.0
- click==8.1.8
- colorama==0.4.6
- coverage==7.8.0
- debugpy==1.5.1+20.g0105bd92
- distlib==0.3.9
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- flake8==7.2.0
- iniconfig==2.1.0
- mccabe==0.7.0
- mypy-extensions==1.0.0
- packaging==24.2
- pathspec==0.12.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/debugpy
| [
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_basic",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_1",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_2",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_3",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_4"
] | [] | [] | [] | MIT License | 12,092 | 2,129 | [
"src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py"
] |
tefra__xsdata-650 | f4d73b124eeefec415ac55c4d4e7da84009ed9c6 | 2022-01-27 18:39:51 | 16b12b6135cdfdefbfcae1b8c76ba4602560117e | diff --git a/xsdata/codegen/handlers/attribute_substitution.py b/xsdata/codegen/handlers/attribute_substitution.py
index cbe3acce..c79eaf6b 100644
--- a/xsdata/codegen/handlers/attribute_substitution.py
+++ b/xsdata/codegen/handlers/attribute_substitution.py
@@ -9,6 +9,7 @@ from xsdata.codegen.models import Attr
from xsdata.codegen.models import AttrType
from xsdata.codegen.models import Class
from xsdata.codegen.utils import ClassUtils
+from xsdata.models.enums import Tag
from xsdata.utils import collections
@@ -82,6 +83,6 @@ class AttributeSubstitutionHandler(RelativeHandlerInterface):
return Attr(
name=source.name,
types=[AttrType(qname=source.qname)],
- tag=source.tag,
+ tag=Tag.ELEMENT,
namespace=source.namespace,
)
diff --git a/xsdata/codegen/validator.py b/xsdata/codegen/validator.py
index 0271842e..68da50e7 100644
--- a/xsdata/codegen/validator.py
+++ b/xsdata/codegen/validator.py
@@ -156,4 +156,5 @@ class ClassValidator:
ct.namespace = el.namespace or ct.namespace
ct.help = el.help or ct.help
+ ct.substitutions = el.substitutions
classes.remove(el)
| substitutionGroup not detected during model generation
I try to use XSDATA to parse Business Process Model and Notation (BPMN) 2.0.2 diagrams.
https://www.omg.org/spec/BPMN/2.0.2/
The XS schema contains a _substitutionGroup_ Element that is not detected during model generation. When parsing a valid BPMN XML file using the generated models an "Unknown property" occurs.
A working example with full error description and details (schema, generated model, parser code, name of substitutionGroup) can be found here:
https://github.com/THM-MA/xsdata-substGrp
I've already tried different structure styles (filenames, namespaces, clusters, usw.) to avoid namespace conflicts without success.
The XSDATA documentation 21.8 (2021-08-03) mentions _„Updated fields derived from xs:substitutionGroups to optional”_. May this be the problem?
Any help is highly appreciated!
Thanks
Thomas
| tefra/xsdata | diff --git a/tests/codegen/handlers/test_attribute_substitution.py b/tests/codegen/handlers/test_attribute_substitution.py
index fd3672fb..9066be47 100644
--- a/tests/codegen/handlers/test_attribute_substitution.py
+++ b/tests/codegen/handlers/test_attribute_substitution.py
@@ -4,6 +4,7 @@ from xsdata.codegen.container import ClassContainer
from xsdata.codegen.handlers import AttributeSubstitutionHandler
from xsdata.codegen.models import AttrType
from xsdata.models.config import GeneratorConfig
+from xsdata.models.enums import Tag
from xsdata.utils.namespaces import build_qname
from xsdata.utils.testing import AttrFactory
from xsdata.utils.testing import AttrTypeFactory
@@ -116,7 +117,7 @@ class AttributeSubstitutionHandlerTests(FactoryTestCase):
name=item.name,
default=None,
types=[AttrType(qname=build_qname("foo", "bar"))],
- tag=item.tag,
+ tag=Tag.ELEMENT,
)
self.assertEqual(expected, actual)
diff --git a/tests/codegen/test_validator.py b/tests/codegen/test_validator.py
index fee88f85..9ac070bc 100644
--- a/tests/codegen/test_validator.py
+++ b/tests/codegen/test_validator.py
@@ -93,8 +93,14 @@ class ClassValidatorTests(FactoryTestCase):
)
def test_merge_global_types(self):
- one = ClassFactory.create(qname="foo", tag=Tag.ELEMENT, namespace="a", help="b")
- two = ClassFactory.create(qname="foo", tag=Tag.COMPLEX_TYPE)
+ one = ClassFactory.create(
+ qname="foo",
+ tag=Tag.ELEMENT,
+ namespace="a",
+ help="b",
+ substitutions=["a", "b"],
+ )
+ two = ClassFactory.create(qname="foo", tag=Tag.COMPLEX_TYPE, substitutions=[])
three = ClassFactory.create(qname="foo", tag=Tag.SIMPLE_TYPE)
classes = [one, two, three]
@@ -128,6 +134,8 @@ class ClassValidatorTests(FactoryTestCase):
self.assertIn(three, classes)
self.assertEqual(one.namespace, two.namespace)
self.assertEqual(one.help, two.help)
+ self.assertEqual(one.substitutions, two.substitutions)
+ self.assertEqual(2, len(one.substitutions))
@mock.patch.object(ClassUtils, "copy_extensions")
@mock.patch.object(ClassUtils, "copy_attributes")
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 22.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[cli,lxml,soap]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-benchmark",
"pytest-cov"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
click-default-group==1.2.4
coverage==7.8.0
docformatter==1.7.5
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
Jinja2==3.1.6
lxml==5.3.1
MarkupSafe==3.0.2
packaging==24.2
pluggy==1.5.0
py-cpuinfo==9.0.0
pytest==8.3.5
pytest-benchmark==5.1.0
pytest-cov==6.0.0
requests==2.32.3
tomli==2.2.1
toposort==1.10
untokenize==0.1.1
urllib3==2.3.0
-e git+https://github.com/tefra/xsdata.git@f4d73b124eeefec415ac55c4d4e7da84009ed9c6#egg=xsdata
| name: xsdata
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- click-default-group==1.2.4
- coverage==7.8.0
- docformatter==1.7.5
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- jinja2==3.1.6
- lxml==5.3.1
- markupsafe==3.0.2
- packaging==24.2
- pluggy==1.5.0
- py-cpuinfo==9.0.0
- pytest==8.3.5
- pytest-benchmark==5.1.0
- pytest-cov==6.0.0
- requests==2.32.3
- tomli==2.2.1
- toposort==1.10
- untokenize==0.1.1
- urllib3==2.3.0
prefix: /opt/conda/envs/xsdata
| [
"tests/codegen/handlers/test_attribute_substitution.py::AttributeSubstitutionHandlerTests::test_create_substitution",
"tests/codegen/test_validator.py::ClassValidatorTests::test_merge_global_types"
] | [] | [
"tests/codegen/handlers/test_attribute_substitution.py::AttributeSubstitutionHandlerTests::test_create_substitutions",
"tests/codegen/handlers/test_attribute_substitution.py::AttributeSubstitutionHandlerTests::test_process",
"tests/codegen/handlers/test_attribute_substitution.py::AttributeSubstitutionHandlerTests::test_process_attribute",
"tests/codegen/test_validator.py::ClassValidatorTests::test_handle_duplicate_types",
"tests/codegen/test_validator.py::ClassValidatorTests::test_handle_duplicate_types_with_redefined_type",
"tests/codegen/test_validator.py::ClassValidatorTests::test_merge_redefined_type_with_circular_extension",
"tests/codegen/test_validator.py::ClassValidatorTests::test_merge_redefined_type_with_circular_group",
"tests/codegen/test_validator.py::ClassValidatorTests::test_process",
"tests/codegen/test_validator.py::ClassValidatorTests::test_remove_invalid_classes",
"tests/codegen/test_validator.py::ClassValidatorTests::test_select_winner"
] | [] | MIT License | 12,093 | 343 | [
"xsdata/codegen/handlers/attribute_substitution.py",
"xsdata/codegen/validator.py"
] |
|
PyCQA__pyflakes-675 | dbb18432165bdc567e015511b3f3284f0f14cf05 | 2022-01-28 14:34:54 | dd446ed156837f50a06596ec79efc292e856954f | diff --git a/pyflakes/checker.py b/pyflakes/checker.py
index 45c7a4a..45f2a42 100644
--- a/pyflakes/checker.py
+++ b/pyflakes/checker.py
@@ -1291,7 +1291,14 @@ class Checker(object):
parent_stmt != node._pyflakes_parent and
not self.isLiteralTupleUnpacking(parent_stmt)):
binding = Binding(name, node)
- elif name == '__all__' and isinstance(self.scope, ModuleScope):
+ elif (
+ name == '__all__' and
+ isinstance(self.scope, ModuleScope) and
+ isinstance(
+ node._pyflakes_parent,
+ (ast.Assign, ast.AugAssign, ast.AnnAssign)
+ )
+ ):
binding = ExportBinding(name, node._pyflakes_parent, self.scope)
elif PY2 and isinstance(getattr(node, 'ctx', None), ast.Param):
binding = Argument(name, self.getScopeNode(node))
| Internal error on `__all__, = ...`
Steps to reproduce:
1. Install `pyflakes==2.4.0`
2. Create a file with the name of `foo.py`
3. Place the following in the file (the comma after `__all__` is intentional):
```py
__all__, = (
"fizz",
"buzz",
)
```
4. Run `pyflakes foo.py`
Outcome:
```
Traceback (most recent call last):
File "/tmp/foo/venv/bin/pyflakes", line 8, in <module>
sys.exit(main())
...
File "/tmp/foo/venv/lib/python3.9/site-packages/pyflakes/checker.py", line 591, in __init__
if isinstance(source.value, (ast.List, ast.Tuple)):
AttributeError: 'Tuple' object has no attribute 'value'
```
After a bit of investigation, [this method](https://github.com/PyCQA/pyflakes/blob/master/pyflakes/checker.py#L580) assumes that `source` is a `ast.Name`. But in reality it can be `ast.Tuple` or some other more complicated target.
I think the right behaviour is to mark this as invalid to the user.
I'd be happy to work on this issue myself. | PyCQA/pyflakes | diff --git a/pyflakes/test/test_imports.py b/pyflakes/test/test_imports.py
index d5be269..07504d9 100644
--- a/pyflakes/test/test_imports.py
+++ b/pyflakes/test/test_imports.py
@@ -1057,6 +1057,12 @@ class TestSpecialAll(TestCase):
__all__ = ["bar"]
''', m.UnusedImport)
+ def test_ignored_when_not_directly_assigned(self):
+ self.flakes('''
+ import bar
+ (__all__,) = ("foo",)
+ ''', m.UnusedImport)
+
def test_warningSuppressed(self):
"""
If a name is imported and unused but is named in C{__all__}, no warning
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 2.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
-e git+https://github.com/PyCQA/pyflakes.git@dbb18432165bdc567e015511b3f3284f0f14cf05#egg=pyflakes
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: pyflakes
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/pyflakes
| [
"pyflakes/test/test_imports.py::TestSpecialAll::test_ignored_when_not_directly_assigned"
] | [] | [
"pyflakes/test/test_imports.py::TestImportationObject::test_import_as",
"pyflakes/test/test_imports.py::TestImportationObject::test_import_basic",
"pyflakes/test/test_imports.py::TestImportationObject::test_import_submodule",
"pyflakes/test/test_imports.py::TestImportationObject::test_import_submodule_as",
"pyflakes/test/test_imports.py::TestImportationObject::test_import_submodule_as_source_name",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_future",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_member",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_member_as",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_relative",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_relative_parent",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_relative_with_module",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_relative_with_module_as",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_star",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_star_relative",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_submodule_member",
"pyflakes/test/test_imports.py::TestImportationObject::test_importfrom_submodule_member_as",
"pyflakes/test/test_imports.py::TestImportationObject::test_unusedImport_underscore",
"pyflakes/test/test_imports.py::Test::test_aliasedImport",
"pyflakes/test/test_imports.py::Test::test_aliasedImportShadowModule",
"pyflakes/test/test_imports.py::Test::test_assignRHSFirst",
"pyflakes/test/test_imports.py::Test::test_assignedToGlobal",
"pyflakes/test/test_imports.py::Test::test_differentSubmoduleImport",
"pyflakes/test/test_imports.py::Test::test_duplicateSubmoduleImport",
"pyflakes/test/test_imports.py::Test::test_functionNamesAreBoundNow",
"pyflakes/test/test_imports.py::Test::test_functionsRunLater",
"pyflakes/test/test_imports.py::Test::test_futureImport",
"pyflakes/test/test_imports.py::Test::test_futureImportFirst",
"pyflakes/test/test_imports.py::Test::test_futureImportStar",
"pyflakes/test/test_imports.py::Test::test_futureImportUndefined",
"pyflakes/test/test_imports.py::Test::test_futureImportUsed",
"pyflakes/test/test_imports.py::Test::test_ignoreNonImportRedefinitions",
"pyflakes/test/test_imports.py::Test::test_importInClass",
"pyflakes/test/test_imports.py::Test::test_importStar",
"pyflakes/test/test_imports.py::Test::test_importStar_relative",
"pyflakes/test/test_imports.py::Test::test_importUsedInMethodDefinition",
"pyflakes/test/test_imports.py::Test::test_importedInClass",
"pyflakes/test/test_imports.py::Test::test_localImportStar",
"pyflakes/test/test_imports.py::Test::test_methodsDontUseClassScope",
"pyflakes/test/test_imports.py::Test::test_nestedClassAndFunctionScope",
"pyflakes/test/test_imports.py::Test::test_nestedFunctionsNestScope",
"pyflakes/test/test_imports.py::Test::test_newAssignment",
"pyflakes/test/test_imports.py::Test::test_nonGlobalDoesNotRedefine",
"pyflakes/test/test_imports.py::Test::test_notUsedInNestedScope",
"pyflakes/test/test_imports.py::Test::test_packageImport",
"pyflakes/test/test_imports.py::Test::test_redefinedButUsedLater",
"pyflakes/test/test_imports.py::Test::test_redefinedByClass",
"pyflakes/test/test_imports.py::Test::test_redefinedByExcept",
"pyflakes/test/test_imports.py::Test::test_redefinedByFor",
"pyflakes/test/test_imports.py::Test::test_redefinedByFunction",
"pyflakes/test/test_imports.py::Test::test_redefinedBySubclass",
"pyflakes/test/test_imports.py::Test::test_redefinedIf",
"pyflakes/test/test_imports.py::Test::test_redefinedIfElse",
"pyflakes/test/test_imports.py::Test::test_redefinedInClass",
"pyflakes/test/test_imports.py::Test::test_redefinedInNestedFunction",
"pyflakes/test/test_imports.py::Test::test_redefinedInNestedFunctionTwice",
"pyflakes/test/test_imports.py::Test::test_redefinedTry",
"pyflakes/test/test_imports.py::Test::test_redefinedTryElse",
"pyflakes/test/test_imports.py::Test::test_redefinedTryExcept",
"pyflakes/test/test_imports.py::Test::test_redefinedTryExceptElse",
"pyflakes/test/test_imports.py::Test::test_redefinedTryExceptElseFinally",
"pyflakes/test/test_imports.py::Test::test_redefinedTryExceptFinally",
"pyflakes/test/test_imports.py::Test::test_redefinedTryExceptMulti",
"pyflakes/test/test_imports.py::Test::test_redefinedTryNested",
"pyflakes/test/test_imports.py::Test::test_redefinedWhileUnused",
"pyflakes/test/test_imports.py::Test::test_shadowedByFor",
"pyflakes/test/test_imports.py::Test::test_shadowedByForDeep",
"pyflakes/test/test_imports.py::Test::test_shadowedByLambda",
"pyflakes/test/test_imports.py::Test::test_shadowedByParameter",
"pyflakes/test/test_imports.py::Test::test_tryingMultipleImports",
"pyflakes/test/test_imports.py::Test::test_unusedImport",
"pyflakes/test/test_imports.py::Test::test_unusedImport_relative",
"pyflakes/test/test_imports.py::Test::test_unusedInNestedScope",
"pyflakes/test/test_imports.py::Test::test_unusedPackageImport",
"pyflakes/test/test_imports.py::Test::test_unused_package_with_submodule_import",
"pyflakes/test/test_imports.py::Test::test_usedAndGlobal",
"pyflakes/test/test_imports.py::Test::test_usedImport",
"pyflakes/test/test_imports.py::Test::test_usedImport_relative",
"pyflakes/test/test_imports.py::Test::test_usedInAssert",
"pyflakes/test/test_imports.py::Test::test_usedInAssignment",
"pyflakes/test/test_imports.py::Test::test_usedInAttributeAssign",
"pyflakes/test/test_imports.py::Test::test_usedInCall",
"pyflakes/test/test_imports.py::Test::test_usedInClass",
"pyflakes/test/test_imports.py::Test::test_usedInClassBase",
"pyflakes/test/test_imports.py::Test::test_usedInDict",
"pyflakes/test/test_imports.py::Test::test_usedInElifConditional",
"pyflakes/test/test_imports.py::Test::test_usedInElse",
"pyflakes/test/test_imports.py::Test::test_usedInExcept",
"pyflakes/test/test_imports.py::Test::test_usedInExec",
"pyflakes/test/test_imports.py::Test::test_usedInFor",
"pyflakes/test/test_imports.py::Test::test_usedInForElse",
"pyflakes/test/test_imports.py::Test::test_usedInFunction",
"pyflakes/test/test_imports.py::Test::test_usedInGetattr",
"pyflakes/test/test_imports.py::Test::test_usedInGlobal",
"pyflakes/test/test_imports.py::Test::test_usedInIfBody",
"pyflakes/test/test_imports.py::Test::test_usedInIfConditional",
"pyflakes/test/test_imports.py::Test::test_usedInKeywordArg",
"pyflakes/test/test_imports.py::Test::test_usedInLambda",
"pyflakes/test/test_imports.py::Test::test_usedInList",
"pyflakes/test/test_imports.py::Test::test_usedInListComp",
"pyflakes/test/test_imports.py::Test::test_usedInLogic",
"pyflakes/test/test_imports.py::Test::test_usedInOperators",
"pyflakes/test/test_imports.py::Test::test_usedInParameterDefault",
"pyflakes/test/test_imports.py::Test::test_usedInRaise",
"pyflakes/test/test_imports.py::Test::test_usedInReturn",
"pyflakes/test/test_imports.py::Test::test_usedInSlice",
"pyflakes/test/test_imports.py::Test::test_usedInSliceObj",
"pyflakes/test/test_imports.py::Test::test_usedInSubscript",
"pyflakes/test/test_imports.py::Test::test_usedInTry",
"pyflakes/test/test_imports.py::Test::test_usedInTryFinally",
"pyflakes/test/test_imports.py::Test::test_usedInTuple",
"pyflakes/test/test_imports.py::Test::test_usedInWhile",
"pyflakes/test/test_imports.py::Test::test_usedInYield",
"pyflakes/test/test_imports.py::Test::test_used_package_with_submodule_import",
"pyflakes/test/test_imports.py::Test::test_used_package_with_submodule_import_of_alias",
"pyflakes/test/test_imports.py::TestSpecialAll::test_all_mixed_attributes_and_strings",
"pyflakes/test/test_imports.py::TestSpecialAll::test_all_with_attributes",
"pyflakes/test/test_imports.py::TestSpecialAll::test_all_with_attributes_added",
"pyflakes/test/test_imports.py::TestSpecialAll::test_all_with_names",
"pyflakes/test/test_imports.py::TestSpecialAll::test_augmentedAssignment",
"pyflakes/test/test_imports.py::TestSpecialAll::test_ignoredInClass",
"pyflakes/test/test_imports.py::TestSpecialAll::test_ignoredInFunction",
"pyflakes/test/test_imports.py::TestSpecialAll::test_importStarExported",
"pyflakes/test/test_imports.py::TestSpecialAll::test_importStarNotExported",
"pyflakes/test/test_imports.py::TestSpecialAll::test_list_concatenation_assignment",
"pyflakes/test/test_imports.py::TestSpecialAll::test_redefinedByGenExp",
"pyflakes/test/test_imports.py::TestSpecialAll::test_tuple_concatenation_assignment",
"pyflakes/test/test_imports.py::TestSpecialAll::test_unboundExported",
"pyflakes/test/test_imports.py::TestSpecialAll::test_usedAsClassDecorator",
"pyflakes/test/test_imports.py::TestSpecialAll::test_usedAsDecorator",
"pyflakes/test/test_imports.py::TestSpecialAll::test_usedInGenExp",
"pyflakes/test/test_imports.py::TestSpecialAll::test_warningSuppressed"
] | [] | MIT License | 12,098 | 237 | [
"pyflakes/checker.py"
] |
|
asottile__babi-187 | e137233f4d79614f5b9d2cd5f6021a19a02fdde8 | 2022-01-29 18:36:54 | e137233f4d79614f5b9d2cd5f6021a19a02fdde8 | asottile: this still needs a test if you're looking for something to work on | diff --git a/babi/buf.py b/babi/buf.py
index 01b385f..0b03d45 100644
--- a/babi/buf.py
+++ b/babi/buf.py
@@ -179,11 +179,6 @@ class Buf:
def remove_ins_callback(self, cb: InsCallback) -> None:
self._ins_callbacks.remove(cb)
- def clear_callbacks(self) -> None:
- self._set_callbacks[:] = [self._set_cb]
- self._ins_callbacks[:] = [self._ins_cb]
- self._del_callbacks[:] = [self._del_cb]
-
@contextlib.contextmanager
def record(self) -> Generator[list[Modification], None, None]:
modifications: list[Modification] = []
diff --git a/babi/file.py b/babi/file.py
index 3312d1e..03664b1 100644
--- a/babi/file.py
+++ b/babi/file.py
@@ -286,7 +286,6 @@ class File:
*file_hls,
self._trailing_whitespace, self._replace_hl, self.selection,
)
- self.buf.clear_callbacks()
for file_hl in self._file_hls:
file_hl.register_callbacks(self.buf)
@@ -295,10 +294,10 @@ class File:
hl_factories: tuple[HLFactory, ...],
color_manager: ColorManager,
) -> None:
- self._trailing_whitespace = TrailingWhitespace(color_manager)
self._hl_factories = hl_factories
# only re-initialize the highlighters if we've loaded once
if self._file_hls:
+ self._trailing_whitespace = TrailingWhitespace(color_manager)
self._initialize_highlighters()
def __repr__(self) -> str:
diff --git a/babi/highlight.py b/babi/highlight.py
index cbd2f75..284ac53 100644
--- a/babi/highlight.py
+++ b/babi/highlight.py
@@ -10,6 +10,7 @@ from typing import Tuple
from typing import TypeVar
from identify.identify import tags_from_filename
+from identify.identify import tags_from_path
from babi._types import Protocol
from babi.fdict import FChainMap
@@ -721,7 +722,11 @@ class Grammars:
return self.compiler_for_scope('source.unknown')
def compiler_for_file(self, filename: str, first_line: str) -> Compiler:
- for tag in tags_from_filename(filename) - {'text'}:
+ try:
+ tags = tags_from_path(filename)
+ except ValueError:
+ tags = tags_from_filename(filename)
+ for tag in tags - {'text'}:
try:
# TODO: this doesn't always match even if we detect it
return self.compiler_for_scope(f'source.{tag}')
| utilize identify's shebang detection for grammar when possible
an example:
```python
#!/usr/bin/env python3
print('hello')
```
this file saved as `foo/install` gets highlighted incorrectly as shell, but we can utilize `tags_from_path` to identify this as `python` earlier on | asottile/babi | diff --git a/tests/features/retheme_test.py b/tests/features/retheme_test.py
index d0accea..404259c 100644
--- a/tests/features/retheme_test.py
+++ b/tests/features/retheme_test.py
@@ -15,10 +15,7 @@ THEME = json.dumps({
SYNTAX = json.dumps({
'scopeName': 'source.demo',
'fileTypes': ['demo'],
- 'patterns': [
- {'match': r'#.*$\n?', 'name': 'comment'},
- {'match': r'-.*$\n?', 'name': 'minus'},
- ],
+ 'patterns': [{'match': r'#.*$\n?', 'name': 'comment'}],
})
@@ -106,67 +103,3 @@ def test_retheme_command_multiple_files(run, xdg_config_home, tmpdir):
[(20, 40, 0)] * 13 + [(236, 40, 0)] * 27, # # hello world
]):
h.assert_screen_attr_equals(i, attr)
-
-
-def test_retheme_bug(run, xdg_config_home, tmpdir):
- # this tests a complicated theme reloading bug triggered by:
- # - simple theme with not many colors
- # - reloads into a more complicated theme
- # - and then trailing whitespace is introduced
-
- # at the time of the fix the bug was a leak holding onto the old
- # highlighters and color manager through callbacks
-
- def hot_modify_theme():
- theme_json = json.dumps({
- 'colors': {'background': '#00d700', 'foreground': '#303030'},
- 'tokenColors': [
- {'scope': 'comment', 'settings': {'foreground': '#c00'}},
- {'scope': 'minus', 'settings': {'foreground': '#00c'}},
- ],
- })
- xdg_config_home.join('babi/theme.json').write(theme_json)
-
- f = tmpdir.join('t.demo')
- f.write('# hello\n- world\n')
-
- c_rev = [(236, 40, curses.A_REVERSE)]
- c_base = [(236, 40, 0)]
- c_comment = [(160, 40, 0)]
- c_minus = [(20, 40, 0)]
- c_ws = [(-1, 1, 0)]
-
- with run(str(f), term='screen-256color', width=80) as h, and_exit(h):
- h.await_text('# hello\n- world\n')
-
- for i, attr in enumerate([
- c_rev * 80, # header
- c_comment * 7 + c_base * 73, # # hello
- c_base * 80, # - world
- ]):
- h.assert_screen_attr_equals(i, attr)
-
- h.run(hot_modify_theme)
-
- trigger_command_mode(h)
- h.press_and_enter(':retheme')
- h.await_text_missing(':retheme')
-
- for i, attr in enumerate([
- c_rev * 80, # header
- c_comment * 7 + c_base * 73, # # hello
- c_minus * 7 + c_base * 73, # - world
- ]):
- h.assert_screen_attr_equals(i, attr)
-
- # trigger trailing whitespace
- h.press_and_enter('hi ')
- h.await_text('hi')
-
- for i, attr in enumerate([
- c_rev * 80, # header
- c_base * 2 + c_ws * 1 + c_base * 77, # hi<space>
- c_comment * 7 + c_base * 73, # # hello
- c_minus * 7 + c_base * 73, # - world
- ]):
- h.assert_screen_attr_equals(i, attr)
diff --git a/tests/highlight_test.py b/tests/highlight_test.py
index db8d2b7..3a72e5a 100644
--- a/tests/highlight_test.py
+++ b/tests/highlight_test.py
@@ -1,5 +1,7 @@
from __future__ import annotations
+import stat
+
import pytest
from babi.highlight import highlight_line
@@ -13,6 +15,17 @@ def test_grammar_matches_extension_only_name(make_grammars):
assert compiler.root_state.entries[0].scope[0] == 'shell'
+def test_file_without_extension(tmpdir, make_grammars):
+ f = tmpdir.join('f')
+ f.write('#!/usr/bin/env python3')
+ f.chmod(stat.S_IRWXU)
+
+ data = {'scopeName': 'source.python', 'patterns': []}
+ grammars = make_grammars(data)
+ compiler = grammars.compiler_for_file(str(f), f.read())
+ assert compiler.root_state.entries[0].scope[0] == 'source.python'
+
+
def test_grammar_matches_via_identify_tag(make_grammars):
grammars = make_grammars({'scopeName': 'source.ini', 'patterns': []})
compiler = grammars.compiler_for_file('setup.cfg', '')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 3
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y tmux gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/asottile/babi.git@e137233f4d79614f5b9d2cd5f6021a19a02fdde8#egg=babi
babi_grammars==0.0.62
cffi==1.17.1
covdefaults==2.3.0
coverage==7.8.0
exceptiongroup==1.2.2
hecate @ git+https://github.com/asottile/hecate@875567f2ca2a58220c4f1f70b0db9a79c018e2ae
identify==2.6.9
iniconfig==2.1.0
onigurumacffi==1.4.1
packaging==24.2
pluggy==1.5.0
pycparser==2.22
pytest==8.3.5
remote-pdb==2.1.0
tomli==2.2.1
| name: babi
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- babi-grammars==0.0.62
- cffi==1.17.1
- covdefaults==2.3.0
- coverage==7.8.0
- exceptiongroup==1.2.2
- hecate==0.1.0
- identify==2.6.9
- iniconfig==2.1.0
- onigurumacffi==1.4.1
- packaging==24.2
- pluggy==1.5.0
- pycparser==2.22
- pytest==8.3.5
- remote-pdb==2.1.0
- tomli==2.2.1
prefix: /opt/conda/envs/babi
| [
"tests/highlight_test.py::test_file_without_extension"
] | [
"tests/features/retheme_test.py::test_retheme_signal[tmux]",
"tests/features/retheme_test.py::test_retheme_command_multiple_files[tmux]"
] | [
"tests/features/retheme_test.py::test_retheme_signal[fake]",
"tests/features/retheme_test.py::test_retheme_command_multiple_files[fake]",
"tests/highlight_test.py::test_grammar_matches_extension_only_name",
"tests/highlight_test.py::test_grammar_matches_via_identify_tag",
"tests/highlight_test.py::test_backslash_a",
"tests/highlight_test.py::test_backslash_g_inline",
"tests/highlight_test.py::test_backslash_g_next_line",
"tests/highlight_test.py::test_end_before_other_match",
"tests/highlight_test.py::test_backslash_g_captures_nl",
"tests/highlight_test.py::test_backslash_g_captures_nl_next_line",
"tests/highlight_test.py::test_while_no_nl",
"tests/highlight_test.py::test_complex_captures",
"tests/highlight_test.py::test_captures_multiple_applied_to_same_capture",
"tests/highlight_test.py::test_captures_ignores_empty",
"tests/highlight_test.py::test_captures_ignores_invalid_out_of_bounds",
"tests/highlight_test.py::test_captures_begin_end",
"tests/highlight_test.py::test_captures_while_captures",
"tests/highlight_test.py::test_captures_implies_begin_end_captures",
"tests/highlight_test.py::test_captures_implies_begin_while_captures",
"tests/highlight_test.py::test_include_self",
"tests/highlight_test.py::test_include_repository_rule",
"tests/highlight_test.py::test_include_with_nested_repositories",
"tests/highlight_test.py::test_include_other_grammar",
"tests/highlight_test.py::test_include_base",
"tests/highlight_test.py::test_rule_with_begin_and_no_end",
"tests/highlight_test.py::test_begin_end_substitute_special_chars",
"tests/highlight_test.py::test_backslash_z",
"tests/highlight_test.py::test_buggy_begin_end_grammar"
] | [] | MIT License | 12,102 | 674 | [
"babi/buf.py",
"babi/file.py",
"babi/highlight.py"
] |
keras-team__keras-tuner-653 | be682573c6f6be1e3f3e6dcac786a34ccac19d3b | 2022-01-30 01:41:36 | d07a23e2f20c649ec8d1fd38040aa93685faf042 | codecov-commenter: # [Codecov](https://codecov.io/gh/keras-team/keras-tuner/pull/653?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=keras-team) Report
> Merging [#653](https://codecov.io/gh/keras-team/keras-tuner/pull/653?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=keras-team) (64933a2) into [master](https://codecov.io/gh/keras-team/keras-tuner/commit/be682573c6f6be1e3f3e6dcac786a34ccac19d3b?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=keras-team) (be68257) will **decrease** coverage by `0.03%`.
> The diff coverage is `100.00%`.
[](https://codecov.io/gh/keras-team/keras-tuner/pull/653?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=keras-team)
```diff
@@ Coverage Diff @@
## master #653 +/- ##
==========================================
- Coverage 92.42% 92.38% -0.04%
==========================================
Files 36 36
Lines 2876 2876
==========================================
- Hits 2658 2657 -1
- Misses 218 219 +1
```
| [Impacted Files](https://codecov.io/gh/keras-team/keras-tuner/pull/653?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=keras-team) | Coverage Δ | |
|---|---|---|
| [keras\_tuner/engine/oracle.py](https://codecov.io/gh/keras-team/keras-tuner/pull/653/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=keras-team#diff-a2VyYXNfdHVuZXIvZW5naW5lL29yYWNsZS5weQ==) | `95.04% <100.00%> (+0.02%)` | :arrow_up: |
| [keras\_tuner/engine/tuner\_utils.py](https://codecov.io/gh/keras-team/keras-tuner/pull/653/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=keras-team#diff-a2VyYXNfdHVuZXIvZW5naW5lL3R1bmVyX3V0aWxzLnB5) | `85.86% <100.00%> (-0.62%)` | :arrow_down: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/keras-team/keras-tuner/pull/653?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=keras-team).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=keras-team)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/keras-team/keras-tuner/pull/653?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=keras-team). Last update [be68257...64933a2](https://codecov.io/gh/keras-team/keras-tuner/pull/653?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=keras-team). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=keras-team).
haifeng-jin: @brydon You will need to run `shell/format.sh` to format the code before I can merge it. Thanks.
brydon: Formatting fixed! | diff --git a/keras_tuner/engine/oracle.py b/keras_tuner/engine/oracle.py
index cbc558c..e568e14 100644
--- a/keras_tuner/engine/oracle.py
+++ b/keras_tuner/engine/oracle.py
@@ -183,7 +183,9 @@ class Oracle(stateful.Stateful):
if tuner_id in self.ongoing_trials:
return self.ongoing_trials[tuner_id]
- trial_id = trial_lib.generate_trial_id()
+ # Make the trial_id the current number of trial, pre-padded with 0s
+ trial_id = "{{:0{}d}}".format(len(str(self.max_trials)))
+ trial_id = trial_id.format(len(self.trials))
if self.max_trials and len(self.trials) >= self.max_trials:
status = trial_lib.TrialStatus.STOPPED
diff --git a/keras_tuner/engine/tuner_utils.py b/keras_tuner/engine/tuner_utils.py
index f87a0a8..1a3ced3 100644
--- a/keras_tuner/engine/tuner_utils.py
+++ b/keras_tuner/engine/tuner_utils.py
@@ -95,7 +95,6 @@ class Display(object):
def __init__(self, oracle, verbose=1):
self.verbose = verbose
self.oracle = oracle
- self.trial_number = len(self.oracle.trials)
self.col_width = 18
# Start time for the overall search
@@ -107,7 +106,7 @@ class Display(object):
def on_trial_begin(self, trial):
if self.verbose >= 1:
- self.trial_number += 1
+ self.trial_number = int(trial.trial_id) + 1
print()
print("Search: Running Trial #{}".format(self.trial_number))
print()
diff --git a/keras_tuner/tuners/bayesian.py b/keras_tuner/tuners/bayesian.py
index 96f91f2..e0ebd69 100644
--- a/keras_tuner/tuners/bayesian.py
+++ b/keras_tuner/tuners/bayesian.py
@@ -50,7 +50,7 @@ def matern_kernel(x, y=None):
# nu = 2.5
dists = cdist(x, y)
dists *= math.sqrt(5)
- kernel_matrix = (1.0 + dists + dists ** 2 / 3.0) * np.exp(-dists)
+ kernel_matrix = (1.0 + dists + dists**2 / 3.0) * np.exp(-dists)
return kernel_matrix
@@ -122,7 +122,7 @@ class GaussianProcessRegressor(object):
y_var[y_var < 0] = 0.0
# Undo normalize y.
- y_var *= self._y_train_std ** 2
+ y_var *= self._y_train_std**2
y_mean = self._y_train_std * y_mean + self._y_train_mean
return y_mean.flatten(), np.sqrt(y_var)
@@ -301,8 +301,9 @@ class BayesianOptimizationOracle(oracle_module.Oracle):
prob = hp_module.value_to_cumulative_prob(trial_value, hp)
vector.append(prob)
- if trial in ongoing_trials:
- # "Hallucinate" the results of ongoing trials. This ensures that
+ if trial in ongoing_trials and hasattr(self.gpr, "_x_train"):
+ # Check if self.gpr has had a .fit called at least once and then
+ # "hallucinate" the results of ongoing trials. This ensures that
# repeat trials are not selected when running distributed.
x_h = np.array(vector).reshape((1, -1))
y_h_mean, y_h_std = self.gpr.predict(x_h)
diff --git a/keras_tuner/tuners/sklearn_tuner.py b/keras_tuner/tuners/sklearn_tuner.py
index 8843f8a..9e1ec64 100644
--- a/keras_tuner/tuners/sklearn_tuner.py
+++ b/keras_tuner/tuners/sklearn_tuner.py
@@ -13,6 +13,7 @@
# limitations under the License.
"""Tuner for Scikit-learn Models."""
import collections
+import inspect
import os
import pickle
import warnings
@@ -166,7 +167,9 @@ class SklearnTuner(base_tuner.BaseTuner):
)
model = self.hypermodel.build(trial.hyperparameters)
- if isinstance(model, sklearn.pipeline.Pipeline):
+
+ supports_sw = "sample_weight" in inspect.getfullargspec(model.fit).args
+ if isinstance(model, sklearn.pipeline.Pipeline) or not supports_sw:
model.fit(X_train, y_train)
else:
model.fit(X_train, y_train, sample_weight=sample_weight_train)
| When running distributed, ongoing count of trial number is not synced between workers
When running keras_tuner distributed, the `Display` prints out a string like `Search: Running Trial #x`.
Initially, these numbers make sense. Meaning, if I run 5 workers I'll see `Search: Running Trial #1` in the first, `Search: Running Trial #2`, all the way to `Search: Running Trial #5` in the fifth. However, the second iteration of the first worker will read `Search: Running Trial #2` as well (despite the fact that the "second trial" was already run).
This is purely a cosmetic bug -- that is `max_trials` is always respected. However, there is no easy way to check the progress of the workers except by doing some kind of count of the `trial_xxxx` folders in the output directory.
**To Reproduce**
The following MWE is composed of three files.
Here is `trial_id_mwe.py` - a simple example of using `keras_tuner` to maximize a Gaussian.
```
import keras_tuner as kt
import numpy as np
import time
class MWETuner(kt.Tuner):
def run_trial(self, trial):
hp = trial.hyperparameters
a = hp.Float('a', -2, 2)
time.sleep(10) # Simulate training loop
return {"score": np.exp(-a**2)}
if __name__ == "__main__":
oracle = kt.oracles.RandomSearchOracle(
objective=kt.Objective("score", "max"),
max_trials=6
)
tuner = MWETuner(
oracle=oracle,
directory="results",
project_name="mwe"
)
tuner.search()
```
I then launch my chief with this bash script `run_chief.sh`
```
export KERASTUNER_TUNER_ID="chief"
export KERASTUNER_ORACLE_IP="127.0.0.1"
export KERASTUNER_ORACLE_PORT="8000"
export TF_CPP_MIN_LOG_LEVEL="3"
export CUDA_DEVICE_ORDER=""
export CUDA_VISIBLE_DEVICES=""
python trial_id_mwe.py
```
and my two worker classes with this bash script `run_worker.sh`
```
export KERASTUNER_TUNER_ID="tuner$1"
export KERASTUNER_ORACLE_IP="127.0.0.1"
export KERASTUNER_ORACLE_PORT="8000"
export TF_CPP_MIN_LOG_LEVEL="3"
export CUDA_DEVICE_ORDER=""
export CUDA_VISIBLE_DEVICES=""
python trial_id_mwe.py
```
Where I launch `sh run_worker.sh 0` and `sh run_worker.sh 1` for the two tuners. Note: I disable GPU to make this example run easily on CPU, though this error persists when training over multiple GPUs as well.
**Expected behavior**
Would expect the number to sync between worker `Oracle`s.
**Would you like to help us fix it?**
Pull request submitted. | keras-team/keras-tuner | diff --git a/keras_tuner/engine/tuner_utils_test.py b/keras_tuner/engine/tuner_utils_test.py
index 17403a2..312ef8f 100644
--- a/keras_tuner/engine/tuner_utils_test.py
+++ b/keras_tuner/engine/tuner_utils_test.py
@@ -92,14 +92,11 @@ def test_convert_to_metrics_with_float():
def test_convert_to_metrics_with_dict():
- assert (
- tuner_utils.convert_to_metrics_dict(
- {"loss": 0.2, "val_loss": 0.1},
- obj_module.Objective("val_loss", "min"),
- "func_name",
- )
- == {"loss": 0.2, "val_loss": 0.1}
- )
+ assert tuner_utils.convert_to_metrics_dict(
+ {"loss": 0.2, "val_loss": 0.1},
+ obj_module.Objective("val_loss", "min"),
+ "func_name",
+ ) == {"loss": 0.2, "val_loss": 0.1}
def test_convert_to_metrics_with_list_of_floats():
diff --git a/keras_tuner/tuners/bayesian_test.py b/keras_tuner/tuners/bayesian_test.py
index 20063f5..81f3bc3 100644
--- a/keras_tuner/tuners/bayesian_test.py
+++ b/keras_tuner/tuners/bayesian_test.py
@@ -356,3 +356,62 @@ def test_distributed_optimization(tmp_path):
# For log-scale param, just check that the order of magnitude is correct.
log_best_c = math.log(best_hps["c"], 10)
assert log_best_c > -4 and log_best_c < -2
+
+
+def test_interleaved_distributed_optimization(tmp_path):
+ hps = hp_module.HyperParameters()
+ hps.Float("a", -1, 1)
+ hps.Float("b", -1, 1)
+ hps.Float("c", -1, 1)
+ hps.Float("d", -1, 1)
+
+ def evaluate(hp):
+ # Minimum at a=4, b=1, c=1e-3 with score=-1
+ return -1 * hp["a"] ** 3 + hp["b"] ** 3 + hp["c"] - abs(hp["d"])
+
+ oracle = bo_module.BayesianOptimizationOracle(
+ objective=kt.Objective("score", "min"),
+ hyperparameters=hps,
+ max_trials=60,
+ num_initial_points=2,
+ )
+ oracle._set_project_dir(tmp_path, "untitled")
+
+ # Run 4 trials on 2 tuners
+
+ # Start both tuners at the same time
+ trial_1 = oracle.create_trial("tuner_0")
+ trial_2 = oracle.create_trial("tuner_1")
+
+ # tuner_0 finishes trial_1 before tuner_1 finishes
+ oracle.update_trial(
+ trial_1.trial_id, {"score": evaluate(trial_1.hyperparameters)}
+ )
+ oracle.end_trial(trial_1.trial_id, "COMPLETED")
+
+ # tuner_0 request a new trial (trial_3)
+ trial_3 = oracle.create_trial("tuner_0")
+
+ # tuner_1 finishes trial_2
+ oracle.update_trial(
+ trial_2.trial_id, {"score": evaluate(trial_2.hyperparameters)}
+ )
+ oracle.end_trial(trial_2.trial_id, "COMPLETED")
+
+ # tuner_1 requests the final new trial (trial_4)
+ # the Bayesian optimizer will use ongoing trial_3 to hallucinate
+ trial_4 = oracle.create_trial("tuner_1")
+
+ # tuner_0 finishes trial_3
+ oracle.update_trial(
+ trial_3.trial_id, {"score": evaluate(trial_3.hyperparameters)}
+ )
+ oracle.end_trial(trial_3.trial_id, "COMPLETED")
+
+ # tuner_1 finishes trial_4
+ oracle.update_trial(
+ trial_4.trial_id, {"score": evaluate(trial_4.hyperparameters)}
+ )
+ oracle.end_trial(trial_4.trial_id, "COMPLETED")
+
+ assert True
diff --git a/keras_tuner/tuners/sklearn_tuner_test.py b/keras_tuner/tuners/sklearn_tuner_test.py
index cdfa375..89a6e2b 100644
--- a/keras_tuner/tuners/sklearn_tuner_test.py
+++ b/keras_tuner/tuners/sklearn_tuner_test.py
@@ -21,13 +21,14 @@ from sklearn import ensemble
from sklearn import linear_model
from sklearn import metrics
from sklearn import model_selection
+from sklearn import neighbors
from sklearn import pipeline
import keras_tuner as kt
def build_model(hp):
- model_type = hp.Choice("model_type", ["random_forest", "ridge"])
+ model_type = hp.Choice("model_type", ["random_forest", "ridge", "knn"])
if model_type == "random_forest":
with hp.conditional_scope("model_type", "random_forest"):
model = ensemble.RandomForestClassifier(
@@ -39,6 +40,15 @@ def build_model(hp):
model = linear_model.RidgeClassifier(
alpha=hp.Float("alpha", 1e-3, 1, sampling="log")
)
+ elif model_type == "knn":
+ with hp.conditional_scope("model_type", "knn"):
+ k = hp.Int("n_neighbors", 1, 30, default=5)
+ model = neighbors.KNeighborsClassifier(
+ n_neighbors=k,
+ weights=hp.Choice(
+ "weights", ["uniform", "distance"], default="uniform"
+ ),
+ )
else:
raise ValueError("Unrecognized model_type")
return model
@@ -48,7 +58,7 @@ def build_pipeline(hp):
n_components = hp.Choice("n_components", [2, 5, 10], default=5)
pca = decomposition.PCA(n_components=n_components)
- model_type = hp.Choice("model_type", ["random_forest", "ridge"])
+ model_type = hp.Choice("model_type", ["random_forest", "ridge", "knn"])
if model_type == "random_forest":
with hp.conditional_scope("model_type", "random_forest"):
model = ensemble.RandomForestClassifier(
@@ -60,6 +70,15 @@ def build_pipeline(hp):
model = linear_model.RidgeClassifier(
alpha=hp.Float("alpha", 1e-3, 1, sampling="log")
)
+ elif model_type == "knn":
+ with hp.conditional_scope("model_type", "knn"):
+ k = hp.Int("n_neighbors", 1, 30, default=5)
+ model = neighbors.KNeighborsClassifier(
+ n_neighbors=k,
+ weights=hp.Choice(
+ "weights", ["uniform", "distance"], default="uniform"
+ ),
+ )
else:
raise ValueError("Unrecognized model_type")
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 4
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tests]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"tensorflow-cpu",
"future",
"tabulate",
"terminaltables",
"colorama",
"tqdm",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | absl-py==2.1.0
astunparse==1.6.3
backcall==0.2.0
black==23.3.0
cachetools==5.5.2
certifi @ file:///croot/certifi_1671487769961/work/certifi
charset-normalizer==3.4.1
click==8.1.8
colorama==0.4.6
coverage==7.2.7
decorator==5.1.1
exceptiongroup==1.2.2
execnet==2.0.2
flake8==5.0.4
flatbuffers==25.2.10
future==1.0.0
gast==0.4.0
google-auth==2.38.0
google-auth-oauthlib==0.4.6
google-pasta==0.2.0
grpcio==1.62.3
h5py==3.8.0
idna==3.10
importlib-metadata==4.2.0
iniconfig==2.0.0
ipython==7.34.0
isort==5.11.5
jedi==0.19.2
joblib==1.3.2
keras==2.11.0
-e git+https://github.com/keras-team/keras-tuner.git@be682573c6f6be1e3f3e6dcac786a34ccac19d3b#egg=keras_tuner
kt-legacy==1.0.5
libclang==18.1.1
Markdown==3.3.4
MarkupSafe==2.1.5
matplotlib-inline==0.1.6
mccabe==0.7.0
mypy-extensions==1.0.0
numpy==1.21.6
oauthlib==3.2.2
opt-einsum==3.3.0
packaging==24.0
pandas==1.3.5
parso==0.8.4
pathspec==0.11.2
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==4.0.0
pluggy==1.2.0
portpicker==1.6.0
prompt_toolkit==3.0.48
protobuf==3.19.6
psutil==7.0.0
ptyprocess==0.7.0
pyasn1==0.5.1
pyasn1-modules==0.3.0
pycodestyle==2.9.1
pyflakes==2.5.0
Pygments==2.17.2
pytest==7.4.4
pytest-cov==4.1.0
pytest-xdist==3.5.0
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.31.0
requests-oauthlib==2.0.0
rsa==4.9
scikit-learn==1.0.2
scipy==1.7.3
six==1.17.0
tabulate==0.9.0
tensorboard==2.11.2
tensorboard-data-server==0.6.1
tensorboard-plugin-wit==1.8.1
tensorflow-cpu==2.11.0
tensorflow-estimator==2.11.0
tensorflow-io-gcs-filesystem==0.34.0
termcolor==2.3.0
terminaltables==3.1.10
threadpoolctl==3.1.0
tomli==2.0.1
tqdm==4.67.1
traitlets==5.9.0
typed-ast==1.5.5
typing_extensions==4.7.1
urllib3==2.0.7
wcwidth==0.2.13
Werkzeug==2.2.3
wrapt==1.16.0
zipp==3.15.0
| name: keras-tuner
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- absl-py==2.1.0
- astunparse==1.6.3
- backcall==0.2.0
- black==23.3.0
- cachetools==5.5.2
- charset-normalizer==3.4.1
- click==8.1.8
- colorama==0.4.6
- coverage==7.2.7
- decorator==5.1.1
- exceptiongroup==1.2.2
- execnet==2.0.2
- flake8==5.0.4
- flatbuffers==25.2.10
- future==1.0.0
- gast==0.4.0
- google-auth==2.38.0
- google-auth-oauthlib==0.4.6
- google-pasta==0.2.0
- grpcio==1.62.3
- h5py==3.8.0
- idna==3.10
- importlib-metadata==4.2.0
- iniconfig==2.0.0
- ipython==7.34.0
- isort==5.11.5
- jedi==0.19.2
- joblib==1.3.2
- keras==2.11.0
- kt-legacy==1.0.5
- libclang==18.1.1
- markdown==3.3.4
- markupsafe==2.1.5
- matplotlib-inline==0.1.6
- mccabe==0.7.0
- mypy-extensions==1.0.0
- numpy==1.21.6
- oauthlib==3.2.2
- opt-einsum==3.3.0
- packaging==24.0
- pandas==1.3.5
- parso==0.8.4
- pathspec==0.11.2
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==4.0.0
- pluggy==1.2.0
- portpicker==1.6.0
- prompt-toolkit==3.0.48
- protobuf==3.19.6
- psutil==7.0.0
- ptyprocess==0.7.0
- pyasn1==0.5.1
- pyasn1-modules==0.3.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pygments==2.17.2
- pytest==7.4.4
- pytest-cov==4.1.0
- pytest-xdist==3.5.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.31.0
- requests-oauthlib==2.0.0
- rsa==4.9
- scikit-learn==1.0.2
- scipy==1.7.3
- six==1.17.0
- tabulate==0.9.0
- tensorboard==2.11.2
- tensorboard-data-server==0.6.1
- tensorboard-plugin-wit==1.8.1
- tensorflow-cpu==2.11.0
- tensorflow-estimator==2.11.0
- tensorflow-io-gcs-filesystem==0.34.0
- termcolor==2.3.0
- terminaltables==3.1.10
- threadpoolctl==3.1.0
- tomli==2.0.1
- tqdm==4.67.1
- traitlets==5.9.0
- typed-ast==1.5.5
- typing-extensions==4.7.1
- urllib3==2.0.7
- wcwidth==0.2.13
- werkzeug==2.2.3
- wrapt==1.16.0
- zipp==3.15.0
prefix: /opt/conda/envs/keras-tuner
| [
"keras_tuner/tuners/bayesian_test.py::test_interleaved_distributed_optimization",
"keras_tuner/tuners/sklearn_tuner_test.py::test_sklearn_tuner_simple_with_np",
"keras_tuner/tuners/sklearn_tuner_test.py::test_sklearn_tuner_with_df",
"keras_tuner/tuners/sklearn_tuner_test.py::test_sklearn_custom_scoring_and_cv",
"keras_tuner/tuners/sklearn_tuner_test.py::test_sklearn_additional_metrics",
"keras_tuner/tuners/sklearn_tuner_test.py::test_sklearn_sample_weight",
"keras_tuner/tuners/sklearn_tuner_test.py::test_sklearn_cv_with_groups",
"keras_tuner/tuners/sklearn_tuner_test.py::test_sklearn_real_data"
] | [] | [
"keras_tuner/engine/tuner_utils_test.py::test_save_best_epoch_with_single_objective",
"keras_tuner/engine/tuner_utils_test.py::test_save_best_epoch_with_multi_objective",
"keras_tuner/engine/tuner_utils_test.py::test_convert_to_metrics_with_history",
"keras_tuner/engine/tuner_utils_test.py::test_convert_to_metrics_with_float",
"keras_tuner/engine/tuner_utils_test.py::test_convert_to_metrics_with_dict",
"keras_tuner/engine/tuner_utils_test.py::test_convert_to_metrics_with_list_of_floats",
"keras_tuner/engine/tuner_utils_test.py::test_convert_to_metrics_with_dict_without_obj_key",
"keras_tuner/tuners/bayesian_test.py::test_gpr_mse_is_small",
"keras_tuner/tuners/bayesian_test.py::test_bayesian_oracle",
"keras_tuner/tuners/bayesian_test.py::test_bayesian_oracle_with_zero_y",
"keras_tuner/tuners/bayesian_test.py::test_bayesian_dynamic_space",
"keras_tuner/tuners/bayesian_test.py::test_bayesian_save_reload",
"keras_tuner/tuners/bayesian_test.py::test_bayesian_optimization_tuner",
"keras_tuner/tuners/bayesian_test.py::test_bayesian_optimization_tuner_set_alpha_beta",
"keras_tuner/tuners/bayesian_test.py::test_save_before_result",
"keras_tuner/tuners/bayesian_test.py::test_bayesian_oracle_maximize",
"keras_tuner/tuners/bayesian_test.py::test_hyperparameters_added",
"keras_tuner/tuners/bayesian_test.py::test_step_respected",
"keras_tuner/tuners/bayesian_test.py::test_float_optimization",
"keras_tuner/tuners/bayesian_test.py::test_distributed_optimization",
"keras_tuner/tuners/sklearn_tuner_test.py::test_sklearn_pipeline",
"keras_tuner/tuners/sklearn_tuner_test.py::test_sklearn_not_install_error",
"keras_tuner/tuners/sklearn_tuner_test.py::test_sklearn_deprecation_warning"
] | [] | Apache License 2.0 | 12,106 | 1,162 | [
"keras_tuner/engine/oracle.py",
"keras_tuner/engine/tuner_utils.py",
"keras_tuner/tuners/bayesian.py",
"keras_tuner/tuners/sklearn_tuner.py"
] |
encode__starlette-1459 | d6269e2f26fd41aa7d08f72a896b45162df69115 | 2022-01-31 08:33:33 | e7c1858146ed2ae5082345823cb99bd69f7c67e0 | o-fedorov: > Alternative to #1452
>
> Would you mind adding this test?
>
> ```python
> def test_exception_on_mounted_apps(test_client_factory):
> sub_app = Starlette(routes=[Route("/", exc)])
> app.mount("/sub", sub_app)
>
> client = test_client_factory(app)
> with pytest.raises(Exception) as ctx:
> client.get("/sub/")
> assert str(ctx.value) == "Exc"
> ```
The test is added
Kludex: I don't think this work if the `Response` object from the middleware is ignored, as below:
```python
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import Response
class CustomHeaderMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
await call_next(request)
return Response("Do you see an exception now?")
middleware = [
Middleware(CustomHeaderMiddleware)
]
app = Starlette(routes=routes, middleware=middleware)
```
Can you confirm? Using the same `StreamingResponse` endpoint, you created on your test with this middleware.
#1452 should be able to solve this.
o-fedorov: Sorry, both this approach and #1452 seems to not solving the issue if the Response object from the middleware is ignored
Kludex: More context about this PR can be found on #1452 and #1433.
The issue here is that we check if the endpoint (or app) raised an exception only if `http.response.start` is not sent, but `StreamingResponse` (and mounted apps) pass that conditional.
Kludex: Thanks for the PR @o-fedorov ! :) | diff --git a/starlette/middleware/base.py b/starlette/middleware/base.py
index 423f407..bfb4a54 100644
--- a/starlette/middleware/base.py
+++ b/starlette/middleware/base.py
@@ -52,6 +52,9 @@ class BaseHTTPMiddleware:
assert message["type"] == "http.response.body"
yield message.get("body", b"")
+ if app_exc is not None:
+ raise app_exc
+
response = StreamingResponse(
status_code=message["status"], content=body_stream()
)
| Raising Exceptions in sub-applications routes
### Checklist
- [X] The bug is reproducible against the latest release or `master`.
- [X] There are no similar issues or pull requests to fix it yet.
### Describe the bug
Let's start with this PR: #1262
It's about preventing raise `anyio.ExceptionGroup` in views under a `BaseHTTPMiddleware`. PR resolve that problem with nonlocal variable that stores our exception. But in the case of sub-applications, it does not work.
As I can see (fyi I am not good at asyncio), in the case below, we reach and read a response before we raise an exception and store it to our nonlocal variable:
fragment of `BaseHTTPMiddleware.__call__`
```python
async def call_next(request: Request) -> Response:
app_exc: typing.Optional[Exception] = None
send_stream, recv_stream = anyio.create_memory_object_stream()
async def coro() -> None:
nonlocal app_exc
async with send_stream:
try:
task = await self.app(scope, request.receive, send_stream.send)
except Exception as exc:
app_exc = exc
task_group.start_soon(coro)
try:
message = await recv_stream.receive()
except anyio.EndOfStream:
if app_exc is not None:
raise app_exc
raise RuntimeError("No response returned.")
...
response = StreamingResponse(
status_code=message["status"], content=body_stream()
)
response.raw_headers = message["headers"]
return response
```
in this moment: `except anyio.EndOfStream:` exception still no raised.
### Steps to reproduce the bug
```python
import httpx
import pytest
from fastapi import FastAPI, APIRouter
from starlette.middleware import Middleware
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Route
class SomeError(Exception):
pass
class SomeMiddleware(BaseHTTPMiddleware):
async def dispatch(
self, request: Request, call_next: RequestResponseEndpoint
) -> Response:
return await call_next(request)
# Drop (or use not BaseHTTPMiddleware based) middleware and test works fine
app = FastAPI(middleware=[Middleware(SomeMiddleware), ])
async def simple_route(request: Request):
raise SomeError
another_router = APIRouter(
routes=[Route('/simple-route/', simple_route, methods=['GET'])]
)
sub_app = FastAPI()
sub_app.include_router(another_router)
app.router.mount(f'/api', sub_app)
@pytest.mark.asyncio
async def test_simple_route():
async with httpx.AsyncClient(app=app) as client:
with pytest.raises(SomeError):
await client.get("http://testserver/api/simple-route/")
```
### Expected behavior
An exception was raised and caught by pytest exception
### Actual behavior
An exception wasn't raised
### Debugging material
_No response_
### Environment
macOS Monterey 12.0.1, starlette 0.17.1, Python 3.9.9
### Additional context
_No response_ | encode/starlette | diff --git a/tests/middleware/test_base.py b/tests/middleware/test_base.py
index c6bfd49..32468dc 100644
--- a/tests/middleware/test_base.py
+++ b/tests/middleware/test_base.py
@@ -3,7 +3,7 @@ import pytest
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.base import BaseHTTPMiddleware
-from starlette.responses import PlainTextResponse
+from starlette.responses import PlainTextResponse, StreamingResponse
from starlette.routing import Route
@@ -28,6 +28,16 @@ def exc(request):
raise Exception("Exc")
[email protected]("/exc-stream")
+def exc_stream(request):
+ return StreamingResponse(_generate_faulty_stream())
+
+
+def _generate_faulty_stream():
+ yield b"Ok"
+ raise Exception("Faulty Stream")
+
+
@app.route("/no-response")
class NoResponse:
def __init__(self, scope, receive, send):
@@ -56,6 +66,10 @@ def test_custom_middleware(test_client_factory):
response = client.get("/exc")
assert str(ctx.value) == "Exc"
+ with pytest.raises(Exception) as ctx:
+ response = client.get("/exc-stream")
+ assert str(ctx.value) == "Faulty Stream"
+
with pytest.raises(RuntimeError):
response = client.get("/no-response")
@@ -158,3 +172,13 @@ def test_fully_evaluated_response(test_client_factory):
client = test_client_factory(app)
response = client.get("/does_not_exist")
assert response.text == "Custom"
+
+
+def test_exception_on_mounted_apps(test_client_factory):
+ sub_app = Starlette(routes=[Route("/", exc)])
+ app.mount("/sub", sub_app)
+
+ client = test_client_factory(app)
+ with pytest.raises(Exception) as ctx:
+ client.get("/sub/")
+ assert str(ctx.value) == "Exc"
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.18 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[full]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": null,
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiosqlite==0.19.0
anyio==3.7.1
async-generator==1.10
attrs==24.2.0
autoflake==1.4
black==21.12b0
bleach==6.0.0
certifi @ file:///croot/certifi_1671487769961/work/certifi
cffi==1.15.1
charset-normalizer==3.4.1
click==8.1.8
colorama==0.4.6
coverage==6.2
cryptography==44.0.2
databases==0.5.3
docutils==0.20.1
exceptiongroup==1.2.2
execnet==2.0.2
flake8==4.0.1
ghp-import==2.1.0
greenlet==3.1.1
idna==3.10
importlib-metadata==4.2.0
iniconfig==2.0.0
isort==5.10.1
itsdangerous==2.1.2
jaraco.classes==3.2.3
jeepney==0.9.0
Jinja2==3.1.6
keyring==23.9.3
Markdown==3.3.4
MarkupSafe==2.1.5
mccabe==0.6.1
mergedeep==1.3.4
mkautodoc==0.1.0
mkdocs==1.2.3
mkdocs-material==8.1.3
mkdocs-material-extensions==1.2
more-itertools==9.1.0
mypy==0.931
mypy-extensions==1.0.0
outcome==1.3.0.post0
packaging==24.0
pathspec==0.11.2
pkginfo==1.10.0
platformdirs==4.0.0
pluggy==1.2.0
py==1.11.0
pycodestyle==2.8.0
pycparser==2.21
pyflakes==2.4.0
Pygments==2.17.2
pymdown-extensions==10.2.1
pytest==7.4.4
pytest-asyncio==0.21.2
pytest-cov==4.1.0
pytest-mock==3.11.1
pytest-xdist==3.5.0
python-dateutil==2.9.0.post0
python-multipart==0.0.8
PyYAML==6.0.1
pyyaml_env_tag==0.1
readme-renderer==37.3
requests==2.31.0
requests-toolbelt==1.0.0
rfc3986==2.0.0
SecretStorage==3.3.3
six==1.17.0
sniffio==1.3.1
sortedcontainers==2.4.0
SQLAlchemy==1.4.54
-e git+https://github.com/encode/starlette.git@d6269e2f26fd41aa7d08f72a896b45162df69115#egg=starlette
toml==0.10.2
tomli==1.2.3
tqdm==4.67.1
trio==0.19.0
twine==3.7.1
typed-ast==1.5.5
types-contextvars==2.4.0
types-dataclasses==0.6.2
types-PyYAML==6.0.1
types-requests==2.26.3
typing_extensions==4.7.1
urllib3==2.0.7
watchdog==3.0.0
webencodings==0.5.1
zipp==3.15.0
| name: starlette
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiosqlite==0.19.0
- anyio==3.7.1
- async-generator==1.10
- attrs==24.2.0
- autoflake==1.4
- black==21.12b0
- bleach==6.0.0
- cffi==1.15.1
- charset-normalizer==3.4.1
- click==8.1.8
- colorama==0.4.6
- coverage==6.2
- cryptography==44.0.2
- databases==0.5.3
- docutils==0.20.1
- exceptiongroup==1.2.2
- execnet==2.0.2
- flake8==4.0.1
- ghp-import==2.1.0
- greenlet==3.1.1
- idna==3.10
- importlib-metadata==4.2.0
- iniconfig==2.0.0
- isort==5.10.1
- itsdangerous==2.1.2
- jaraco-classes==3.2.3
- jeepney==0.9.0
- jinja2==3.1.6
- keyring==23.9.3
- markdown==3.3.4
- markupsafe==2.1.5
- mccabe==0.6.1
- mergedeep==1.3.4
- mkautodoc==0.1.0
- mkdocs==1.2.3
- mkdocs-material==8.1.3
- mkdocs-material-extensions==1.2
- more-itertools==9.1.0
- mypy==0.931
- mypy-extensions==1.0.0
- outcome==1.3.0.post0
- packaging==24.0
- pathspec==0.11.2
- pkginfo==1.10.0
- platformdirs==4.0.0
- pluggy==1.2.0
- py==1.11.0
- pycodestyle==2.8.0
- pycparser==2.21
- pyflakes==2.4.0
- pygments==2.17.2
- pymdown-extensions==10.2.1
- pytest==7.4.4
- pytest-asyncio==0.21.2
- pytest-cov==4.1.0
- pytest-mock==3.11.1
- pytest-xdist==3.5.0
- python-dateutil==2.9.0.post0
- python-multipart==0.0.8
- pyyaml==6.0.1
- pyyaml-env-tag==0.1
- readme-renderer==37.3
- requests==2.31.0
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- secretstorage==3.3.3
- six==1.17.0
- sniffio==1.3.1
- sortedcontainers==2.4.0
- sqlalchemy==1.4.54
- toml==0.10.2
- tomli==1.2.3
- tqdm==4.67.1
- trio==0.19.0
- twine==3.7.1
- typed-ast==1.5.5
- types-contextvars==2.4.0
- types-dataclasses==0.6.2
- types-pyyaml==6.0.1
- types-requests==2.26.3
- typing-extensions==4.7.1
- urllib3==2.0.7
- watchdog==3.0.0
- webencodings==0.5.1
- wheel==0.37.1
- zipp==3.15.0
prefix: /opt/conda/envs/starlette
| [
"tests/middleware/test_base.py::test_custom_middleware[asyncio]",
"tests/middleware/test_base.py::test_exception_on_mounted_apps[asyncio]"
] | [
"tests/middleware/test_base.py::test_custom_middleware[trio]",
"tests/middleware/test_base.py::test_middleware_decorator[trio]",
"tests/middleware/test_base.py::test_state_data_across_multiple_middlewares[trio]",
"tests/middleware/test_base.py::test_app_middleware_argument[trio]",
"tests/middleware/test_base.py::test_fully_evaluated_response[trio]",
"tests/middleware/test_base.py::test_exception_on_mounted_apps[trio]"
] | [
"tests/middleware/test_base.py::test_middleware_decorator[asyncio]",
"tests/middleware/test_base.py::test_state_data_across_multiple_middlewares[asyncio]",
"tests/middleware/test_base.py::test_app_middleware_argument[asyncio]",
"tests/middleware/test_base.py::test_middleware_repr",
"tests/middleware/test_base.py::test_fully_evaluated_response[asyncio]"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,117 | 136 | [
"starlette/middleware/base.py"
] |
pydap__pydap-250 | 5590d2503dae2ca630d511f558abd70dbe21100a | 2022-01-31 12:51:45 | eb8ee96bdf150642bf2e0603f406d2053af02424 | diff --git a/src/pydap/parsers/das.py b/src/pydap/parsers/das.py
index 1618e89..72820d2 100644
--- a/src/pydap/parsers/das.py
+++ b/src/pydap/parsers/das.py
@@ -81,6 +81,10 @@ class DASParser(SimpleParser):
value = str(value).strip('"')
elif value.lower() in ['nan', 'nan.', '-nan']:
value = float('nan')
+ elif value.lower() in ['inf', 'inf.']:
+ value = float('inf')
+ elif value.lower() in ['-inf', '-inf.']:
+ value = float('-inf')
else:
value = ast.literal_eval(value)
| enable infinity for float attributes
enable infinity for float attributes | pydap/pydap | diff --git a/src/pydap/tests/test_parsers_das.py b/src/pydap/tests/test_parsers_das.py
index 69aedee..b597670 100644
--- a/src/pydap/tests/test_parsers_das.py
+++ b/src/pydap/tests/test_parsers_das.py
@@ -26,6 +26,12 @@ DAS = """Attributes {
Float32 COADSX 1e20;
String COADSY "zero";
}
+ floats {
+ Float64 a nan;
+ Float64 b -inf;
+ Float64 c inf;
+ Float64 d 17;
+ }
}"""
# It is important to add attributes that have the same
@@ -69,3 +75,10 @@ class TestParseDAS(unittest.TestCase):
self.assertEqual(self.dataset.SPEH.attributes['TIME'], 0)
self.assertEqual(self.dataset.SPEH.attributes['COADSX'], 1e20)
self.assertEqual(self.dataset.SPEH.attributes['COADSY'], "zero")
+
+ def test_float_attributes(self):
+ """Test various values of float attributes."""
+ self.assertTrue(np.isnan(self.dataset.floats["a"]))
+ self.assertEqual(self.dataset.floats["b"], float("-inf"))
+ self.assertEqual(self.dataset.floats["c"], float("inf"))
+ self.assertEqual(self.dataset.floats["d"], 17.)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 3.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[server,handlers.netcdf,testing]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest pytest-cov pytest-mock",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libhdf5-serial-dev netcdf-bin libnetcdf-dev"
],
"python": "3.7",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///croot/attrs_1668696182826/work
beautifulsoup4==4.13.3
certifi @ file:///croot/certifi_1671487769961/work/certifi
cffi==1.15.1
cftime==1.6.2
charset-normalizer==3.4.1
coards==1.0.5
coverage==7.2.7
cryptography==44.0.2
docopt==0.6.2
flake8==5.0.4
flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core
gsw==3.0.6
gunicorn==23.0.0
idna==3.10
importlib-metadata==4.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
lxml==5.3.1
MarkupSafe==2.1.5
mccabe==0.7.0
netCDF4==1.6.5
numpy==1.21.6
ordereddict==1.1
packaging @ file:///croot/packaging_1671697413597/work
PasteDeploy==3.1.0
pluggy @ file:///tmp/build/80754af9/pluggy_1648042572264/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.9.1
pycparser==2.21
-e git+https://github.com/pydap/pydap.git@5590d2503dae2ca630d511f558abd70dbe21100a#egg=pydap
pyflakes==2.5.0
pyOpenSSL==25.0.0
pytest==7.1.2
pytest-attrib==0.1.3
pytest-cov==4.1.0
pytest-mock==3.11.1
requests==2.31.0
requests-mock==1.12.1
six==1.17.0
soupsieve==2.4.1
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions @ file:///croot/typing_extensions_1669924550328/work
urllib3==2.0.7
waitress==2.1.2
WebOb==1.8.9
WebTest==3.0.1
Werkzeug==2.2.3
zipp @ file:///croot/zipp_1672387121353/work
| name: pydap
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=22.1.0=py37h06a4308_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- flit-core=3.6.0=pyhd3eb1b0_0
- importlib_metadata=4.11.3=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=22.0=py37h06a4308_0
- pip=22.3.1=py37h06a4308_0
- pluggy=1.0.0=py37h06a4308_1
- py=1.11.0=pyhd3eb1b0_0
- pytest=7.1.2=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py37h06a4308_0
- typing_extensions=4.4.0=py37h06a4308_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zipp=3.11.0=py37h06a4308_0
- zlib=1.2.13=h5eee18b_1
- pip:
- beautifulsoup4==4.13.3
- cffi==1.15.1
- cftime==1.6.2
- charset-normalizer==3.4.1
- coards==1.0.5
- coverage==7.2.7
- cryptography==44.0.2
- docopt==0.6.2
- flake8==5.0.4
- gsw==3.0.6
- gunicorn==23.0.0
- idna==3.10
- importlib-metadata==4.2.0
- jinja2==3.1.6
- lxml==5.3.1
- markupsafe==2.1.5
- mccabe==0.7.0
- netcdf4==1.6.5
- numpy==1.21.6
- ordereddict==1.1
- pastedeploy==3.1.0
- pycodestyle==2.9.1
- pycparser==2.21
- pyflakes==2.5.0
- pyopenssl==25.0.0
- pytest-attrib==0.1.3
- pytest-cov==4.1.0
- pytest-mock==3.11.1
- requests==2.31.0
- requests-mock==1.12.1
- six==1.17.0
- soupsieve==2.4.1
- urllib3==2.0.7
- waitress==2.1.2
- webob==1.8.9
- webtest==3.0.1
- werkzeug==2.2.3
prefix: /opt/conda/envs/pydap
| [
"src/pydap/tests/test_parsers_das.py::TestParseDAS::test_SPEH_attributes",
"src/pydap/tests/test_parsers_das.py::TestParseDAS::test_basic",
"src/pydap/tests/test_parsers_das.py::TestParseDAS::test_dot_attribute",
"src/pydap/tests/test_parsers_das.py::TestParseDAS::test_float_attributes",
"src/pydap/tests/test_parsers_das.py::TestParseDAS::test_meta_attributes",
"src/pydap/tests/test_parsers_das.py::TestParseDAS::test_multiple_values",
"src/pydap/tests/test_parsers_das.py::TestParseDAS::test_nan"
] | [] | [] | [] | MIT License | 12,118 | 178 | [
"src/pydap/parsers/das.py"
] |
|
Textualize__textual-246 | 3574a6da172c98a43f813033f39c610d5a3afd84 | 2022-01-31 13:04:35 | bc229b4ee65c329691ae0894a691360bc7e891fb | diff --git a/src/textual/renderables/__init__.py b/src/textual/renderables/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/textual/renderables/underline_bar.py b/src/textual/renderables/underline_bar.py
new file mode 100644
index 000000000..59c9e6bb4
--- /dev/null
+++ b/src/textual/renderables/underline_bar.py
@@ -0,0 +1,122 @@
+from __future__ import annotations
+
+from rich.console import ConsoleOptions, Console, RenderResult
+from rich.segment import Segment
+from rich.style import StyleType
+
+
+class UnderlineBar:
+ """Thin horizontal bar with a portion highlighted.
+
+ Args:
+ highlight_range (tuple[float, float]): The range to highlight. Defaults to ``(0, 0)`` (no highlight)
+ highlight_style (StyleType): The style of the highlighted range of the bar.
+ background_style (StyleType): The style of the non-highlighted range(s) of the bar.
+ width (int, optional): The width of the bar, or ``None`` to fill available width.
+ """
+
+ def __init__(
+ self,
+ highlight_range: tuple[float, float] = (0, 0),
+ highlight_style: StyleType = "magenta",
+ background_style: StyleType = "grey37",
+ width: int | None = None,
+ ) -> None:
+ self.highlight_range = highlight_range
+ self.highlight_style = highlight_style
+ self.background_style = background_style
+ self.width = width
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ highlight_style = console.get_style(self.highlight_style)
+ background_style = console.get_style(self.background_style)
+
+ half_bar_right = "╸"
+ half_bar_left = "╺"
+ bar = "━"
+
+ width = self.width or options.max_width
+ start, end = self.highlight_range
+
+ start = max(start, 0)
+ end = min(end, width)
+
+ if start == end == 0 or end < 0 or start > end:
+ yield Segment(bar * width, style=background_style)
+ return
+
+ # Round start and end to nearest half
+ start = round(start * 2) / 2
+ end = round(end * 2) / 2
+
+ # Check if we start/end on a number that rounds to a .5
+ half_start = start - int(start) > 0
+ half_end = end - int(end) > 0
+
+ # Initial non-highlighted portion of bar
+ yield Segment(bar * (int(start - 0.5)), style=background_style)
+ if not half_start and start > 0:
+ yield Segment(half_bar_right, style=background_style)
+
+ # The highlighted portion
+ bar_width = int(end) - int(start)
+ if half_start:
+ yield Segment(half_bar_left + bar * (bar_width - 1), style=highlight_style)
+ else:
+ yield Segment(bar * bar_width, style=highlight_style)
+ if half_end:
+ yield Segment(half_bar_right, style=highlight_style)
+
+ # The non-highlighted tail
+ if not half_end and end - width != 0:
+ yield Segment(half_bar_left, style=background_style)
+ yield Segment(bar * (int(width) - int(end) - 1), style=background_style)
+
+
+if __name__ == "__main__":
+ import random
+ from time import sleep
+ from rich.color import ANSI_COLOR_NAMES
+
+ console = Console()
+
+ def frange(start, end, step):
+ current = start
+ while current < end:
+ yield current
+ current += step
+
+ while current >= 0:
+ yield current
+ current -= step
+
+ step = 0.1
+ start_range = frange(0.5, 10.5, step)
+ end_range = frange(10, 20, step)
+ ranges = zip(start_range, end_range)
+
+ console.print(UnderlineBar(width=20), f" (.0, .0)")
+
+ for range in ranges:
+ color = random.choice(list(ANSI_COLOR_NAMES.keys()))
+ console.print(
+ UnderlineBar(
+ range,
+ highlight_style=color,
+ width=20,
+ ),
+ f" {range}",
+ )
+
+ from rich.live import Live
+
+ bar = UnderlineBar(width=80, highlight_range=(0, 4.5))
+ with Live(bar, refresh_per_second=60) as live:
+ while True:
+ bar.highlight_range = (
+ bar.highlight_range[0] + 0.1,
+ bar.highlight_range[1] + 0.1,
+ )
+ sleep(0.005)
| Implement a bar renderable for tabbed dialogs
Implement a renderable used to underline tabs in a tabbed dialog.
This renderable should use the same unicode characters as Rich progress bars. It should render a line with a portion in a different color extending from p1 to p2.
Also add the option to not render the highlight and just the background bar.
Suggested interface:
```python
class TabUnderline:
def __init__(self, highlight: tuple[float, float] | None = None, color1: Color, color2: Color):
...
``` | Textualize/textual | diff --git a/tests/renderables/__init__.py b/tests/renderables/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/tests/renderables/test_underline_bar.py b/tests/renderables/test_underline_bar.py
new file mode 100644
index 000000000..5c5e4de9c
--- /dev/null
+++ b/tests/renderables/test_underline_bar.py
@@ -0,0 +1,126 @@
+from tests.utilities.render import render
+from textual.renderables.underline_bar import UnderlineBar
+
+MAGENTA = "\x1b[35m"
+GREY = "\x1b[38;5;59m"
+STOP = "\x1b[0m"
+GREEN = "\x1b[32m"
+RED = "\x1b[31m"
+
+
+def test_no_highlight():
+ bar = UnderlineBar(width=6)
+ assert render(bar) == f"{GREY}━━━━━━{STOP}"
+
+
+def test_highlight_from_zero():
+ bar = UnderlineBar(highlight_range=(0, 2.5), width=6)
+ assert render(bar) == (
+ f"{MAGENTA}━━{STOP}{MAGENTA}╸{STOP}{GREY}━━━{STOP}"
+ )
+
+
+def test_highlight_from_zero_point_five():
+ bar = UnderlineBar(highlight_range=(0.5, 2), width=6)
+ assert render(bar) == (
+ f"{MAGENTA}╺━{STOP}{GREY}╺{STOP}{GREY}━━━{STOP}"
+ )
+
+
+def test_highlight_middle():
+ bar = UnderlineBar(highlight_range=(2, 4), width=6)
+ assert render(bar) == (
+ f"{GREY}━{STOP}"
+ f"{GREY}╸{STOP}"
+ f"{MAGENTA}━━{STOP}"
+ f"{GREY}╺{STOP}"
+ f"{GREY}━{STOP}"
+ )
+
+
+def test_highlight_half_start():
+ bar = UnderlineBar(highlight_range=(2.5, 4), width=6)
+ assert render(bar) == (
+ f"{GREY}━━{STOP}"
+ f"{MAGENTA}╺━{STOP}"
+ f"{GREY}╺{STOP}"
+ f"{GREY}━{STOP}"
+ )
+
+
+def test_highlight_half_end():
+ bar = UnderlineBar(highlight_range=(2, 4.5), width=6)
+ assert render(bar) == (
+ f"{GREY}━{STOP}"
+ f"{GREY}╸{STOP}"
+ f"{MAGENTA}━━{STOP}"
+ f"{MAGENTA}╸{STOP}"
+ f"{GREY}━{STOP}"
+ )
+
+
+def test_highlight_half_start_and_half_end():
+ bar = UnderlineBar(highlight_range=(2.5, 4.5), width=6)
+ assert render(bar) == (
+ f"{GREY}━━{STOP}"
+ f"{MAGENTA}╺━{STOP}"
+ f"{MAGENTA}╸{STOP}"
+ f"{GREY}━{STOP}"
+ )
+
+
+def test_highlight_to_near_end():
+ bar = UnderlineBar(highlight_range=(3, 5.5), width=6)
+ assert render(bar) == (
+ f"{GREY}━━{STOP}"
+ f"{GREY}╸{STOP}"
+ f"{MAGENTA}━━{STOP}"
+ f"{MAGENTA}╸{STOP}"
+ )
+
+
+def test_highlight_to_end():
+ bar = UnderlineBar(highlight_range=(3, 6), width=6)
+ assert render(bar) == (
+ f"{GREY}━━{STOP}{GREY}╸{STOP}{MAGENTA}━━━{STOP}"
+ )
+
+
+def test_highlight_out_of_bounds_start():
+ bar = UnderlineBar(highlight_range=(-2, 3), width=6)
+ assert render(bar) == (
+ f"{MAGENTA}━━━{STOP}{GREY}╺{STOP}{GREY}━━{STOP}"
+ )
+
+
+def test_highlight_out_of_bounds_end():
+ bar = UnderlineBar(highlight_range=(3, 9), width=6)
+ assert render(bar) == (
+ f"{GREY}━━{STOP}{GREY}╸{STOP}{MAGENTA}━━━{STOP}"
+ )
+
+
+def test_highlight_full_range_out_of_bounds_end():
+ bar = UnderlineBar(highlight_range=(9, 10), width=6)
+ assert render(bar) == f"{GREY}━━━━━━{STOP}"
+
+
+def test_highlight_full_range_out_of_bounds_start():
+ bar = UnderlineBar(highlight_range=(-5, -2), width=6)
+ assert render(bar) == f"{GREY}━━━━━━{STOP}"
+
+
+def test_custom_styles():
+ bar = UnderlineBar(
+ highlight_range=(2, 4),
+ highlight_style="red",
+ background_style="green",
+ width=6
+ )
+ assert render(bar) == (
+ f"{GREEN}━{STOP}"
+ f"{GREEN}╸{STOP}"
+ f"{RED}━━{STOP}"
+ f"{GREEN}╺{STOP}"
+ f"{GREEN}━{STOP}"
+ )
diff --git a/tests/utilities/render.py b/tests/utilities/render.py
new file mode 100644
index 000000000..a2435c542
--- /dev/null
+++ b/tests/utilities/render.py
@@ -0,0 +1,24 @@
+import io
+import re
+
+from rich.console import Console, RenderableType
+
+
+re_link_ids = re.compile(r"id=[\d\.\-]*?;.*?\x1b")
+
+
+def replace_link_ids(render: str) -> str:
+ """Link IDs have a random ID and system path which is a problem for
+ reproducible tests.
+
+ """
+ return re_link_ids.sub("id=0;foo\x1b", render)
+
+
+def render(renderable: RenderableType, no_wrap: bool = False) -> str:
+ console = Console(
+ width=100, file=io.StringIO(), color_system="truecolor", legacy_windows=False
+ )
+ console.print(renderable, no_wrap=no_wrap)
+ output = replace_link_ids(console.file.getvalue())
+ return output
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 0
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "poetry install",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "poetry",
"pip_packages": null,
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==21.2.0
backports.entry-points-selectable==1.1.0
black==21.12b0
Brotli @ file:///croot/brotli-split_1736182456865/work
build @ file:///croot/python-build_1679596508056/work
CacheControl @ file:///croot/cachecontrol-split_1676365553644/work
certifi @ file:///croot/certifi_1738623731865/work/certifi
cffi @ file:///croot/cffi_1736182485317/work
cfgv==3.3.1
charset-normalizer @ file:///croot/charset-normalizer_1721748349566/work
cleo @ file:///croot/cleo_1705431334181/work
click==8.0.1
colorama==0.4.4
commonmark==0.9.1
coverage==5.5
crashtest @ file:///croot/crashtest_1679422372509/work
cryptography @ file:///croot/cryptography_1740577825284/work
distlib==0.3.2
dulwich @ file:///croot/dulwich_1679420040193/work
filelock==3.0.12
ghp-import==2.0.1
html5lib @ file:///Users/ktietz/demo/mc3/conda-bld/html5lib_1629144453894/work
identify==2.2.13
idna @ file:///croot/idna_1714398848350/work
importlib-metadata==4.6.4
iniconfig==1.1.1
installer @ file:///croot/python-installer_1679432998036/work
jaraco.classes @ file:///tmp/build/80754af9/jaraco.classes_1620983179379/work
jeepney @ file:///tmp/build/80754af9/jeepney_1627537048313/work
Jinja2==3.0.1
jsonschema @ file:///croot/jsonschema_1728486696720/work
jsonschema-specifications @ file:///croot/jsonschema-specifications_1699032386549/work
keyring @ file:///croot/keyring_1678999217139/work
lockfile==0.12.2
Markdown==3.3.4
MarkupSafe==2.0.1
mergedeep==1.3.4
mkdocs==1.2.3
mkdocs-autorefs==0.2.1
mkdocs-material==7.2.5
mkdocs-material-extensions==1.0.1
mkdocstrings==0.15.2
more-itertools @ file:///croot/more-itertools_1727185441804/work
msgpack @ file:///opt/conda/conda-bld/msgpack-python_1652362659880/work
mypy==0.910
mypy-extensions==0.4.3
nodeenv==1.6.0
numpy @ file:///croot/numpy_and_numpy_base_1725470312869/work/dist/numpy-2.0.1-cp39-cp39-linux_x86_64.whl#sha256=b8c18bbfe185fbdff23024458e4b8ffbe2040e705abd5fb6cda1ef9d20b5974d
packaging==21.0
pathspec==0.9.0
pexpect @ file:///tmp/build/80754af9/pexpect_1605563209008/work
pkginfo @ file:///croot/pkginfo_1743184746806/work
platformdirs==2.2.0
pluggy==0.13.1
poetry @ file:///croot/poetry_1680193142998/work
poetry-core @ file:///croot/poetry-core_1680018645313/work
poetry-plugin-export @ file:///croot/poetry-plugin-export_1680122784541/work
pre-commit==2.14.0
ptyprocess @ file:///tmp/build/80754af9/ptyprocess_1609355006118/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
py==1.10.0
pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
Pygments==2.10.0
pymdown-extensions==8.2
pyOpenSSL @ file:///croot/pyopenssl_1741343803032/work
pyparsing==2.4.7
pyproject_hooks @ file:///croot/pyproject_hooks_1679584411881/work
PySocks @ file:///tmp/build/80754af9/pysocks_1605305812635/work
pytest==6.2.4
pytest-cov==2.12.1
python-dateutil==2.8.2
pytkdocs==0.11.1
PyYAML==5.4.1
pyyaml_env_tag==0.1
RapidFuzz @ file:///croot/rapidfuzz_1738592335633/work
referencing @ file:///croot/referencing_1699012038513/work
requests @ file:///croot/requests_1730999120400/work
requests-toolbelt @ file:///Users/ktietz/demo/mc3/conda-bld/requests-toolbelt_1629456163440/work
rich==10.16.1
rpds-py @ file:///croot/rpds-py_1736541261634/work
SecretStorage @ file:///croot/secretstorage_1678709481048/work
shellingham @ file:///croot/shellingham_1669142169426/work
six==1.16.0
-e git+https://github.com/Textualize/textual.git@3574a6da172c98a43f813033f39c610d5a3afd84#egg=textual
toml==0.10.2
tomli==1.2.3
tomlkit @ file:///croot/tomlkit_1728650307440/work
trove-classifiers @ file:///croot/trove-classifiers_1729277230900/work
typing-extensions==3.10.0.2
urllib3 @ file:///croot/urllib3_1718978550903/work
virtualenv==20.7.2
watchdog==2.1.5
webencodings==0.5.1
zipp==3.5.0
| name: textual
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- blas=1.0=openblas
- brotli-python=1.0.9=py39h6a678d5_9
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- cachecontrol=0.12.11=py39h06a4308_1
- certifi=2025.1.31=py39h06a4308_0
- cffi=1.17.1=py39h1fdaa30_1
- charset-normalizer=3.3.2=pyhd3eb1b0_0
- cleo=2.1.0=py39h06a4308_0
- crashtest=0.4.1=py39h06a4308_0
- cryptography=44.0.1=py39h7825ff9_0
- dbus=1.13.18=hb2f20db_0
- dulwich=0.21.3=py39h5eee18b_0
- expat=2.6.4=h6a678d5_0
- glib=2.78.4=h6a678d5_0
- glib-tools=2.78.4=h6a678d5_0
- html5lib=1.1=pyhd3eb1b0_0
- idna=3.7=py39h06a4308_0
- importlib_metadata=8.5.0=hd3eb1b0_0
- jaraco.classes=3.2.1=pyhd3eb1b0_0
- jeepney=0.7.1=pyhd3eb1b0_0
- jsonschema=4.23.0=py39h06a4308_0
- jsonschema-specifications=2023.7.1=py39h06a4308_0
- keyring=23.13.1=py39h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=11.2.0=h00389a5_1
- libgfortran5=11.2.0=h1234567_1
- libglib=2.78.4=hdc74915_0
- libgomp=11.2.0=h1234567_1
- libiconv=1.16=h5eee18b_3
- libopenblas=0.3.21=h043d6bf_0
- libstdcxx-ng=11.2.0=h1234567_1
- lockfile=0.12.2=py39h06a4308_0
- more-itertools=10.3.0=py39h06a4308_0
- msgpack-python=1.0.3=py39hd09550d_0
- ncurses=6.4=h6a678d5_0
- numpy=2.0.1=py39heeff2f4_1
- numpy-base=2.0.1=py39h8a23956_1
- openssl=3.0.16=h5eee18b_0
- pcre2=10.42=hebb0a14_1
- pexpect=4.8.0=pyhd3eb1b0_3
- pip=25.0=py39h06a4308_0
- pkginfo=1.12.0=py39h06a4308_0
- poetry=1.4.0=py39h06a4308_0
- poetry-core=1.5.1=py39h06a4308_0
- poetry-plugin-export=1.3.0=py39h4849bfd_0
- ptyprocess=0.7.0=pyhd3eb1b0_2
- pycparser=2.21=pyhd3eb1b0_0
- pyopenssl=25.0.0=py39h06a4308_0
- pyproject_hooks=1.0.0=py39h06a4308_0
- pysocks=1.7.1=py39h06a4308_0
- python=3.9.21=he870216_1
- python-build=0.10.0=py39h06a4308_0
- python-installer=0.6.0=py39h06a4308_0
- rapidfuzz=3.12.1=py39h6a678d5_0
- readline=8.2=h5eee18b_0
- referencing=0.30.2=py39h06a4308_0
- requests=2.32.3=py39h06a4308_1
- requests-toolbelt=0.9.1=pyhd3eb1b0_0
- rpds-py=0.22.3=py39h4aa5aa6_0
- secretstorage=3.3.1=py39h06a4308_1
- setuptools=75.8.0=py39h06a4308_0
- shellingham=1.5.0=py39h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomlkit=0.13.2=py39h06a4308_0
- trove-classifiers=2024.10.14=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- urllib3=1.26.19=py39h06a4308_0
- webencodings=0.5.1=py39h06a4308_1
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==21.2.0
- backports-entry-points-selectable==1.1.0
- black==21.12b0
- cfgv==3.3.1
- click==8.0.1
- colorama==0.4.4
- commonmark==0.9.1
- coverage==5.5
- distlib==0.3.2
- filelock==3.0.12
- ghp-import==2.0.1
- identify==2.2.13
- importlib-metadata==4.6.4
- iniconfig==1.1.1
- jinja2==3.0.1
- markdown==3.3.4
- markupsafe==2.0.1
- mergedeep==1.3.4
- mkdocs==1.2.3
- mkdocs-autorefs==0.2.1
- mkdocs-material==7.2.5
- mkdocs-material-extensions==1.0.1
- mkdocstrings==0.15.2
- mypy==0.910
- mypy-extensions==0.4.3
- nodeenv==1.6.0
- packaging==21.0
- pathspec==0.9.0
- platformdirs==2.2.0
- pluggy==0.13.1
- pre-commit==2.14.0
- py==1.10.0
- pygments==2.10.0
- pymdown-extensions==8.2
- pyparsing==2.4.7
- pytest==6.2.4
- pytest-cov==2.12.1
- python-dateutil==2.8.2
- pytkdocs==0.11.1
- pyyaml==5.4.1
- pyyaml-env-tag==0.1
- rich==10.16.1
- textual==0.1.13
- toml==0.10.2
- tomli==1.2.3
- typing-extensions==3.10.0.2
- virtualenv==20.7.2
- watchdog==2.1.5
- zipp==3.5.0
prefix: /opt/conda/envs/textual
| [
"tests/renderables/test_underline_bar.py::test_no_highlight",
"tests/renderables/test_underline_bar.py::test_highlight_from_zero",
"tests/renderables/test_underline_bar.py::test_highlight_from_zero_point_five",
"tests/renderables/test_underline_bar.py::test_highlight_middle",
"tests/renderables/test_underline_bar.py::test_highlight_half_start",
"tests/renderables/test_underline_bar.py::test_highlight_half_end",
"tests/renderables/test_underline_bar.py::test_highlight_half_start_and_half_end",
"tests/renderables/test_underline_bar.py::test_highlight_to_near_end",
"tests/renderables/test_underline_bar.py::test_highlight_to_end",
"tests/renderables/test_underline_bar.py::test_highlight_out_of_bounds_start",
"tests/renderables/test_underline_bar.py::test_highlight_out_of_bounds_end",
"tests/renderables/test_underline_bar.py::test_highlight_full_range_out_of_bounds_end",
"tests/renderables/test_underline_bar.py::test_highlight_full_range_out_of_bounds_start",
"tests/renderables/test_underline_bar.py::test_custom_styles"
] | [] | [] | [] | MIT License | 12,119 | 1,192 | [] |
|
wyfo__apischema-342 | afa74786898e3fa92079596a2943c28176995293 | 2022-02-01 23:48:14 | 99e6b23504e8ca775fda9dfa93c4e350211c1b8a | codecov-commenter: # [Codecov](https://codecov.io/gh/wyfo/apischema/pull/342?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Joseph+Perez) Report
> Merging [#342](https://codecov.io/gh/wyfo/apischema/pull/342?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Joseph+Perez) (7c47d6d) into [master](https://codecov.io/gh/wyfo/apischema/commit/af1bf4ad1a04950ea3cdc7497b800cd268b0ea42?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Joseph+Perez) (af1bf4a) will **decrease** coverage by `0.02%`.
> The diff coverage is `80.95%`.
[](https://codecov.io/gh/wyfo/apischema/pull/342?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Joseph+Perez)
```diff
@@ Coverage Diff @@
## master #342 +/- ##
==========================================
- Coverage 86.13% 86.11% -0.03%
==========================================
Files 67 67
Lines 5879 5898 +19
Branches 1218 1222 +4
==========================================
+ Hits 5064 5079 +15
- Misses 596 599 +3
- Partials 219 220 +1
```
| [Impacted Files](https://codecov.io/gh/wyfo/apischema/pull/342?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Joseph+Perez) | Coverage Δ | |
|---|---|---|
| [apischema/deserialization/methods.py](https://codecov.io/gh/wyfo/apischema/pull/342/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Joseph+Perez#diff-YXBpc2NoZW1hL2Rlc2VyaWFsaXphdGlvbi9tZXRob2RzLnB5) | `84.51% <76.47%> (-0.21%)` | :arrow_down: |
| [apischema/deserialization/\_\_init\_\_.py](https://codecov.io/gh/wyfo/apischema/pull/342/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Joseph+Perez#diff-YXBpc2NoZW1hL2Rlc2VyaWFsaXphdGlvbi9fX2luaXRfXy5weQ==) | `98.98% <100.00%> (+<0.01%)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/wyfo/apischema/pull/342?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Joseph+Perez).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Joseph+Perez)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/wyfo/apischema/pull/342?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Joseph+Perez). Last update [af1bf4a...7c47d6d](https://codecov.io/gh/wyfo/apischema/pull/342?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Joseph+Perez). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Joseph+Perez).
wyfo: @pchanial
Actually, I can merge this PR like this, and do the small refactoring myself if you're ok with that. I would like to release the patch tomorrow morning (I don't like having a bug more than 24h in the issues).
If you want to make the change yourself, I will wait, no worry.
pchanial: I had made the change before your last comment, so I just pushed it. I have started looking at ways to refactor this part but the tests fail with cython. I'll make another PR so we can discuss it. | diff --git a/apischema/deserialization/__init__.py b/apischema/deserialization/__init__.py
index eed26ac..240efaf 100644
--- a/apischema/deserialization/__init__.py
+++ b/apischema/deserialization/__init__.py
@@ -58,6 +58,7 @@ from apischema.deserialization.methods import (
FieldsConstructor,
FlattenedField,
FloatMethod,
+ FrozenSetMethod,
IntMethod,
ListCheckOnlyMethod,
ListMethod,
@@ -360,13 +361,19 @@ class DeserializationMethodVisitor(
value_method = value_factory.method
list_constraints = constraints_validators(constraints)[list]
method: DeserializationMethod
- if issubclass(cls, collections.abc.Set):
+ if issubclass(cls, collections.abc.Set) and not issubclass(cls, frozenset):
return SetMethod(list_constraints, value_method)
- elif self.no_copy and check_only(value_method):
+
+ if self.no_copy and check_only(value_method):
method = ListCheckOnlyMethod(list_constraints, value_method)
else:
method = ListMethod(list_constraints, value_method)
- return VariadicTupleMethod(method) if issubclass(cls, tuple) else method
+
+ if issubclass(cls, tuple):
+ return VariadicTupleMethod(method)
+ if issubclass(cls, frozenset):
+ return FrozenSetMethod(method)
+ return method
return self._factory(factory, list)
diff --git a/apischema/deserialization/methods.py b/apischema/deserialization/methods.py
index 5cc1e97..a1d4372 100644
--- a/apischema/deserialization/methods.py
+++ b/apischema/deserialization/methods.py
@@ -308,6 +308,14 @@ class SetMethod(DeserializationMethod):
return values
+@dataclass
+class FrozenSetMethod(DeserializationMethod):
+ method: DeserializationMethod
+
+ def deserialize(self, data: Any) -> Any:
+ return frozenset(self.method.deserialize(data))
+
+
@dataclass
class VariadicTupleMethod(DeserializationMethod):
method: DeserializationMethod
| Fix frozenset deserialization
AbstractSet and FrozenSet are not deserialized in frozensets.
```python
from typing import AbstractSet, FrozenSet
from apischema import deserialize
assert type(deserialize(AbstractSet[int], [1, 2, 3])) is set
assert type(deserialize(FrozenSet[int], [1, 2, 3])) is set
```
PR to follow | wyfo/apischema | diff --git a/tests/unit/test_deserialization_serialization.py b/tests/unit/test_deserialization_serialization.py
index 7d6d0de..a076691 100644
--- a/tests/unit/test_deserialization_serialization.py
+++ b/tests/unit/test_deserialization_serialization.py
@@ -4,6 +4,7 @@ from enum import Enum
from typing import (
AbstractSet,
Any,
+ FrozenSet,
List,
Mapping,
Optional,
@@ -30,6 +31,7 @@ uuid = str(uuid4())
def bijection(cls, data, expected):
obj = deserialize(cls, data)
assert obj == expected
+ assert type(obj) is type(expected)
assert serialize(cls, obj) == data
@@ -97,7 +99,8 @@ def test_primitive_error(data):
(List, [0, SimpleDataclass(0)]),
(Set, {0, SimpleDataclass(0)}),
(Sequence, [0, SimpleDataclass(0)]),
- (AbstractSet, frozenset([0, SimpleDataclass(0)])),
+ (AbstractSet, {0, SimpleDataclass(0)}),
+ (FrozenSet, frozenset([0, SimpleDataclass(0)])),
],
)
def test_collection(cls, expected):
@@ -188,7 +191,7 @@ def test_with_class_context():
class BigInt(int):
pass
- bijection(BigInt, 100, 100)
+ bijection(BigInt, 100, BigInt(100))
def test_properties():
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 0.17 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[examples]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"tests/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/wyfo/apischema.git@afa74786898e3fa92079596a2943c28176995293#egg=apischema
attrs==21.4.0
bson==0.5.10
coverage==7.8.0
Cython==0.29.27
docstring_parser==0.13
graphql-core==3.2.0
greenlet==3.1.1
iniconfig==2.1.0
orjson==3.10.16
packaging==24.2
pluggy==1.5.0
py==1.11.0
pydantic==1.9.0
pytest==6.2.5
pytest-asyncio==0.16.0
pytest-cov==3.0.0
python-dateutil==2.9.0.post0
six==1.17.0
SQLAlchemy==1.4.31
toml==0.10.2
tomli==2.2.1
typing_extensions==4.0.1
| name: apischema
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==21.4.0
- bson==0.5.10
- coverage==7.8.0
- cython==0.29.27
- docstring-parser==0.13
- graphql-core==3.2.0
- greenlet==3.1.1
- iniconfig==2.1.0
- orjson==3.10.16
- packaging==24.2
- pluggy==1.5.0
- py==1.11.0
- pydantic==1.9.0
- pytest==6.2.5
- pytest-asyncio==0.16.0
- pytest-cov==3.0.0
- python-dateutil==2.9.0.post0
- six==1.17.0
- sqlalchemy==1.4.31
- toml==0.10.2
- tomli==2.2.1
- typing-extensions==4.0.1
prefix: /opt/conda/envs/apischema
| [
"tests/unit/test_deserialization_serialization.py::test_collection[cls4-expected4]"
] | [] | [
"tests/unit/test_deserialization_serialization.py::test_any[]",
"tests/unit/test_deserialization_serialization.py::test_any[0]",
"tests/unit/test_deserialization_serialization.py::test_optional[None-None]",
"tests/unit/test_deserialization_serialization.py::test_optional[data1-expected1]",
"tests/unit/test_deserialization_serialization.py::test_optional_error",
"tests/unit/test_deserialization_serialization.py::test_union[-]",
"tests/unit/test_deserialization_serialization.py::test_union[data1-expected1]",
"tests/unit/test_deserialization_serialization.py::test_union_error[0]",
"tests/unit/test_deserialization_serialization.py::test_union_error[None]",
"tests/unit/test_deserialization_serialization.py::test_primitive[int-0]",
"tests/unit/test_deserialization_serialization.py::test_primitive[str-]",
"tests/unit/test_deserialization_serialization.py::test_primitive[bool-True]",
"tests/unit/test_deserialization_serialization.py::test_primitive[float-0.0]",
"tests/unit/test_deserialization_serialization.py::test_primitive_error[]",
"tests/unit/test_deserialization_serialization.py::test_primitive_error[None]",
"tests/unit/test_deserialization_serialization.py::test_collection[cls0-expected0]",
"tests/unit/test_deserialization_serialization.py::test_collection[cls1-expected1]",
"tests/unit/test_deserialization_serialization.py::test_collection[cls2-expected2]",
"tests/unit/test_deserialization_serialization.py::test_collection[cls3-expected3]",
"tests/unit/test_deserialization_serialization.py::test_collection_tuple",
"tests/unit/test_deserialization_serialization.py::test_collection_tuple_variadic",
"tests/unit/test_deserialization_serialization.py::test_iterable_error[data0]",
"tests/unit/test_deserialization_serialization.py::test_iterable_error[data1]",
"tests/unit/test_deserialization_serialization.py::test_mapping[str-data0-expected0]",
"tests/unit/test_deserialization_serialization.py::test_mapping[UUID-data1-expected1]",
"tests/unit/test_deserialization_serialization.py::test_mapping[UUID-data2-expected2]",
"tests/unit/test_deserialization_serialization.py::test_mapping_error[data0]",
"tests/unit/test_deserialization_serialization.py::test_mapping_error[data1]",
"tests/unit/test_deserialization_serialization.py::test_model[expected0]",
"tests/unit/test_deserialization_serialization.py::test_model[expected1]",
"tests/unit/test_deserialization_serialization.py::test_model_error[0]",
"tests/unit/test_deserialization_serialization.py::test_model_error[fake]",
"tests/unit/test_deserialization_serialization.py::test_enum",
"tests/unit/test_deserialization_serialization.py::test_enum_errors",
"tests/unit/test_deserialization_serialization.py::test_literal[0]",
"tests/unit/test_deserialization_serialization.py::test_literal[ok]",
"tests/unit/test_deserialization_serialization.py::test_literal_error",
"tests/unit/test_deserialization_serialization.py::test_dataclass[data0-expected0]",
"tests/unit/test_deserialization_serialization.py::test_dataclass[data1-expected1]",
"tests/unit/test_deserialization_serialization.py::test_dataclass[data2-expected2]",
"tests/unit/test_deserialization_serialization.py::test_dataclass_error[data0]",
"tests/unit/test_deserialization_serialization.py::test_dataclass_error[data1]",
"tests/unit/test_deserialization_serialization.py::test_with_class_context",
"tests/unit/test_deserialization_serialization.py::test_properties",
"tests/unit/test_deserialization_serialization.py::test_deque"
] | [] | MIT License | 12,131 | 506 | [
"apischema/deserialization/__init__.py",
"apischema/deserialization/methods.py"
] |
vyperlang__vyper-2627 | 4dea0cf25a136c0d4b8c452935b172a65e0890d8 | 2022-02-02 21:28:23 | f97f4ebb5fb5c9c492653e07bec511cf59b548c3 | diff --git a/vyper/codegen/expr.py b/vyper/codegen/expr.py
index 23ee166d..5e1e3674 100644
--- a/vyper/codegen/expr.py
+++ b/vyper/codegen/expr.py
@@ -326,7 +326,7 @@ class Expr:
is_constructor = self.expr.get_ancestor(vy_ast.FunctionDef).get("name") == "__init__"
if is_constructor:
# store memory position for later access in module.py in the variable record
- memory_loc = self.context.new_variable(f"#immutable_{self.expr.id}", var.typ)
+ memory_loc = self.context.new_variable(self.expr.id, var.typ)
self.context.global_ctx._globals[self.expr.id].pos = memory_loc
# store the data offset in the variable record as well for accessing
data_offset = self.expr._metadata["type"].position.offset
| Immutable variables used in the constructor
### Version Information
* vyper Version (output of `vyper --version`): 0.3.1
* OS: macOS
* Python Version (output of `python --version`): 3.8.9
### What's your issue about?
If you use immutable variables in the constructor the variable fills with garbage(in this simplified example it remains 0).
```py
# @version 0.3.1
A: immutable(uint256)
a: public(uint256)
@external
def __init__(_a: uint256):
A = _a
self.a = _a # breaks with A
@external
@view
def a1() -> uint256:
return A
```
### How can it be fixed?
1) Immutable variables usage in constructor could be restricted by the compiler or
2) somehow set the right value.
| vyperlang/vyper | diff --git a/tests/parser/features/test_immutable.py b/tests/parser/features/test_immutable.py
index 0162f80b..8d711844 100644
--- a/tests/parser/features/test_immutable.py
+++ b/tests/parser/features/test_immutable.py
@@ -32,6 +32,29 @@ def get_value() -> {typ}:
assert c.get_value() == value
[email protected]("val", [0, 1, 2 ** 256 - 1])
+def test_usage_in_constructor(get_contract, val):
+ code = """
+A: immutable(uint256)
+a: public(uint256)
+
+
+@external
+def __init__(_a: uint256):
+ A = _a
+ self.a = A
+
+
+@external
+@view
+def a1() -> uint256:
+ return A
+ """
+
+ c = get_contract(code, val)
+ assert c.a1() == c.a() == val
+
+
def test_multiple_immutable_values(get_contract):
code = """
a: immutable(uint256)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.10",
"reqs_path": [
"requirements-docs.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiohappyeyeballs==2.6.1
aiohttp==3.11.14
aiosignal==1.3.2
alabaster==0.7.16
altgraph==0.17.4
asttokens==2.0.5
async-timeout==5.0.1
attrs==25.3.0
babel==2.17.0
backports.tarfile==1.2.0
base58==2.1.1
bitarray==2.9.3
black==21.9b0
cached-property==1.5.2
certifi==2025.1.31
cffi==1.17.1
cfgv==3.4.0
charset-normalizer==3.4.1
click==8.1.8
commonmark==0.9.1
coverage==7.8.0
cryptography==44.0.2
cytoolz==0.12.3
decorator==5.2.1
distlib==0.3.9
docutils==0.16
eth-abi==2.2.0
eth-account==0.5.9
eth-bloom==1.0.4
eth-hash==0.3.3
eth-keyfile==0.5.1
eth-keys==0.3.4
eth-rlp==0.2.1
eth-tester==0.6.0b7
eth-typing==2.3.0
eth-utils==1.10.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
executing==2.2.0
filelock==3.18.0
flake8==3.9.2
flake8-bugbear==20.1.4
flake8-use-fstring==1.1
frozenlist==1.5.0
hexbytes==0.2.3
hypothesis==5.49.0
id==1.5.0
identify==2.6.9
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
ipfshttpclient==0.8.0a2
ipython==8.23.0
isort==5.9.3
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jedi==0.19.2
jeepney==0.9.0
Jinja2==3.1.6
jsonschema==3.2.0
keyring==25.6.0
lark-parser==0.10.0
lru-dict==1.3.0
markdown-it-py==3.0.0
MarkupSafe==3.0.2
matplotlib-inline==0.1.7
mccabe==0.6.1
mdurl==0.1.2
more-itertools==10.6.0
multiaddr==0.0.9
multidict==6.0.5
mypy==0.910
mypy_extensions==0.4.4
netaddr==1.3.0
nh3==0.2.21
nodeenv==1.9.1
packaging @ file:///croot/packaging_1734472117206/work
parsimonious==0.8.1
parso==0.8.4
pathspec==0.12.1
pexpect==4.9.0
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pre_commit==4.2.0
prompt_toolkit==3.0.50
propcache==0.3.1
protobuf==3.20.3
ptyprocess==0.7.0
pure_eval==0.2.3
py==1.11.0
py-ecc==5.2.0
py-evm==0.5.0a3
pycodestyle==2.7.0
pycparser==2.22
pycryptodome==3.22.0
pyethash==0.1.27
pyflakes==2.3.1
Pygments==2.19.1
pyinstaller==6.12.0
pyinstaller-hooks-contrib==2025.2
pyrsistent==0.20.0
pysha3==1.0.2
pytest==6.2.5
pytest-cov==2.12.1
pytest-forked==1.6.0
pytest-instafail==0.5.0
pytest-xdist==1.34.0
PyYAML==6.0.2
readme_renderer==43.0
recommonmark==0.7.1
regex==2024.11.6
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==13.9.0
rlp==2.0.1
SecretStorage==3.3.3
semantic-version==2.8.5
six==1.17.0
snowballstemmer==2.2.0
sortedcontainers==2.4.0
Sphinx==3.5.4
sphinx-rtd-theme==0.5.2
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
stack-data==0.5.1
toml==0.10.2
tomli==1.2.3
toolz==1.0.0
tox==3.25.1
traitlets==5.14.3
trie==2.0.0a5
twine==6.1.0
typing-extensions==3.10.0.2
urllib3==2.3.0
varint==1.0.2
virtualenv==20.29.3
-e git+https://github.com/vyperlang/vyper.git@4dea0cf25a136c0d4b8c452935b172a65e0890d8#egg=vyper
wcwidth==0.2.13
web3==5.27.0
websockets==9.1
yarl==1.18.3
zipp==3.21.0
| name: vyper
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py310h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py310h06a4308_0
- pip=25.0=py310h06a4308_0
- pluggy=1.5.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiohappyeyeballs==2.6.1
- aiohttp==3.11.14
- aiosignal==1.3.2
- alabaster==0.7.16
- altgraph==0.17.4
- asttokens==2.0.5
- async-timeout==5.0.1
- attrs==25.3.0
- babel==2.17.0
- backports-tarfile==1.2.0
- base58==2.1.1
- bitarray==2.9.3
- black==21.9b0
- cached-property==1.5.2
- certifi==2025.1.31
- cffi==1.17.1
- cfgv==3.4.0
- charset-normalizer==3.4.1
- click==8.1.8
- commonmark==0.9.1
- coverage==7.8.0
- cryptography==44.0.2
- cytoolz==0.12.3
- decorator==5.2.1
- distlib==0.3.9
- docutils==0.16
- eth-abi==2.2.0
- eth-account==0.5.9
- eth-bloom==1.0.4
- eth-hash==0.3.3
- eth-keyfile==0.5.1
- eth-keys==0.3.4
- eth-rlp==0.2.1
- eth-tester==0.6.0b7
- eth-typing==2.3.0
- eth-utils==1.10.0
- execnet==2.1.1
- executing==2.2.0
- filelock==3.18.0
- flake8==3.9.2
- flake8-bugbear==20.1.4
- flake8-use-fstring==1.1
- frozenlist==1.5.0
- hexbytes==0.2.3
- hypothesis==5.49.0
- id==1.5.0
- identify==2.6.9
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- ipfshttpclient==0.8.0a2
- ipython==8.23.0
- isort==5.9.3
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jedi==0.19.2
- jeepney==0.9.0
- jinja2==3.1.6
- jsonschema==3.2.0
- keyring==25.6.0
- lark-parser==0.10.0
- lru-dict==1.3.0
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- matplotlib-inline==0.1.7
- mccabe==0.6.1
- mdurl==0.1.2
- more-itertools==10.6.0
- multiaddr==0.0.9
- multidict==6.0.5
- mypy==0.910
- mypy-extensions==0.4.4
- netaddr==1.3.0
- nh3==0.2.21
- nodeenv==1.9.1
- parsimonious==0.8.1
- parso==0.8.4
- pathspec==0.12.1
- pexpect==4.9.0
- platformdirs==4.3.7
- pre-commit==4.2.0
- prompt-toolkit==3.0.50
- propcache==0.3.1
- protobuf==3.20.3
- ptyprocess==0.7.0
- pure-eval==0.2.3
- py==1.11.0
- py-ecc==5.2.0
- py-evm==0.5.0a3
- pycodestyle==2.7.0
- pycparser==2.22
- pycryptodome==3.22.0
- pyethash==0.1.27
- pyflakes==2.3.1
- pygments==2.19.1
- pyinstaller==6.12.0
- pyinstaller-hooks-contrib==2025.2
- pyrsistent==0.20.0
- pysha3==1.0.2
- pytest==6.2.5
- pytest-cov==2.12.1
- pytest-forked==1.6.0
- pytest-instafail==0.5.0
- pytest-xdist==1.34.0
- pyyaml==6.0.2
- readme-renderer==43.0
- recommonmark==0.7.1
- regex==2024.11.6
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==13.9.0
- rlp==2.0.1
- secretstorage==3.3.3
- semantic-version==2.8.5
- six==1.17.0
- snowballstemmer==2.2.0
- sortedcontainers==2.4.0
- sphinx==3.5.4
- sphinx-rtd-theme==0.5.2
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- stack-data==0.5.1
- toml==0.10.2
- tomli==1.2.3
- toolz==1.0.0
- tox==3.25.1
- traitlets==5.14.3
- trie==2.0.0a5
- twine==6.1.0
- typing-extensions==3.10.0.2
- urllib3==2.3.0
- varint==1.0.2
- virtualenv==20.29.3
- vyper==0.3.2.dev42+g4dea0cf2
- wcwidth==0.2.13
- web3==5.27.0
- websockets==9.1
- yarl==1.18.3
- zipp==3.21.0
prefix: /opt/conda/envs/vyper
| [
"tests/parser/features/test_immutable.py::test_usage_in_constructor[115792089237316195423570985008687907853269984665640564039457584007913129639935]",
"tests/parser/features/test_immutable.py::test_usage_in_constructor[1]"
] | [] | [
"tests/parser/features/test_immutable.py::test_value_storage_retrieval[bytes32-deadbeefdeadbeefdeadbeefdeadbeef]",
"tests/parser/features/test_immutable.py::test_value_storage_retrieval[address-0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE]",
"tests/parser/features/test_immutable.py::test_value_storage_retrieval[int256--1606938044258990275541962092341162602522202993782792835301376]",
"tests/parser/features/test_immutable.py::test_value_storage_retrieval[int128--85070591730234615865843651857942052864]",
"tests/parser/features/test_immutable.py::test_value_storage_retrieval[bool-True]",
"tests/parser/features/test_immutable.py::test_list_immutable",
"tests/parser/features/test_immutable.py::test_value_storage_retrieval[Bytes[10]-Vyper",
"tests/parser/features/test_immutable.py::test_value_storage_retrieval[uint256-42]",
"tests/parser/features/test_immutable.py::test_multiple_immutable_values",
"tests/parser/features/test_immutable.py::test_usage_in_constructor[0]",
"tests/parser/features/test_immutable.py::test_struct_immutable",
"tests/parser/features/test_immutable.py::test_value_storage_retrieval[String[10]-Vyper"
] | [] | Apache License 2.0 | 12,141 | 209 | [
"vyper/codegen/expr.py"
] |
|
Sage-Bionetworks__Genie-437 | 0485b7e1599c4894714a92d5e12e99cc2b0cd02d | 2022-02-03 00:34:06 | 8c450ae488d55f3f76daf24b08ede575fb96a910 | diff --git a/genie/process_functions.py b/genie/process_functions.py
index ef0b745..3cd5e65 100644
--- a/genie/process_functions.py
+++ b/genie/process_functions.py
@@ -876,18 +876,21 @@ def check_col_and_values(
final.extend(value.split(sep))
check_values = pd.Series(final)
if not check_values.isin(possible_values).all():
- error = "{filename}: Please double check your {col} column. " "This column must only be these values: {possible_vals}\n".format(
- filename=filename,
- col=col,
- possible_vals=", ".join(
- [
- # This is done because of pandas typing.
- # An integer column with one NA/blank value
- # will be cast as a double.
- str(value).replace(".0", "")
- for value in possible_values
- ]
- ),
+ error = (
+ "{filename}: Please double check your {col} column. "
+ "This column must only be these values: {possible_vals}\n".format(
+ filename=filename,
+ col=col,
+ possible_vals=", ".join(
+ [
+ # This is done because of pandas typing.
+ # An integer column with one NA/blank value
+ # will be cast as a double.
+ str(value).replace(".0", "")
+ for value in possible_values
+ ]
+ ),
+ )
)
return (warning, error)
diff --git a/genie_registry/assay.py b/genie_registry/assay.py
index 5cf70ad..eba1ec7 100644
--- a/genie_registry/assay.py
+++ b/genie_registry/assay.py
@@ -141,7 +141,11 @@ class Assayinfo(FileTypeFormat):
warning = ""
if process_functions.checkColExist(assay_info_df, "SEQ_ASSAY_ID"):
- all_seq_assays = assay_info_df.SEQ_ASSAY_ID.unique()
+ all_seq_assays = (
+ assay_info_df.SEQ_ASSAY_ID.replace({"_": "-"}, regex=True)
+ .str.upper()
+ .unique()
+ )
if not all([assay.startswith(self.center) for assay in all_seq_assays]):
total_error += (
"Assay_information.yaml: Please make sure all your "
@@ -160,7 +164,12 @@ class Assayinfo(FileTypeFormat):
)
# These are all the SEQ_ASSAY_IDs that are in the clinical database
# but not in the assay_information file
- missing_seqs = uniq_seq_df["seq"][~uniq_seq_df["seq"].isin(all_seq_assays)]
+ missing_seqs = uniq_seq_df["seq"][
+ ~uniq_seq_df["seq"]
+ .replace({"_": "-"}, regex=True)
+ .str.upper()
+ .isin(all_seq_assays)
+ ]
missing_seqs_str = ", ".join(missing_seqs)
if missing_seqs.to_list():
total_error += (
| Make validation of SEQ_ASSAY_IDs case-insensitive
The SEQ_ASSAY_IDs in the assay_information.yaml are being compared to the 'Sample Clinical Database' table (syn7517674), which converts the SEQ_ASSAY_IDs to all uppercase. If the SEQ_ASSAY_IDs assay_information.yaml contain lowercase characters, they will be erroneously flagged. | Sage-Bionetworks/Genie | diff --git a/tests/test_assay.py b/tests/test_assay.py
index 4572808..b6054e7 100644
--- a/tests/test_assay.py
+++ b/tests/test_assay.py
@@ -85,6 +85,78 @@ def test_validinput__validate():
patch_get_gdc.assert_called()
+def test_case__validate():
+ """Valid input should have no errors or warnings"""
+ assay_info_dict = {
+ 'SEQ_ASSAY_ID': ['sage-1', 'SAGE-3'],
+ 'is_paired_end': [True, False],
+ 'library_strategy': ['value1', 'value2'],
+ 'library_selection': ['value1', 'value2'],
+ 'platform': ['value1', 'value2'],
+ 'instrument_model': ['value1', 'value2'],
+ 'target_capture_kit': ['value1', 'value2'],
+ 'variant_classifications': ['Frame_Shift_Ins', 'Frame_Shift_Ins'],
+ 'read_length': [22, float('nan')],
+ 'number_of_genes': [5, 20],
+ 'gene_padding': [10, None],
+ 'calling_strategy': ['tumor_only', 'tumor_normal'],
+ 'specimen_tumor_cellularity': ['>10%', '>20%'],
+ 'alteration_types': ['snv;small_indels', 'intragenic_cna'],
+ 'preservation_technique': ['FFPE', 'FFPE;fresh_frozen'],
+ 'coverage': ['hotspot_regions;introns', 'introns']}
+ uniq_seq_df = pd.DataFrame({"seq": ["SAGE-1", "SAGE-3"]})
+ assay_info_df = pd.DataFrame(assay_info_dict)
+ test_dict = copy.deepcopy(GDC_DATA_DICT)
+ with patch.object(process_functions, "get_synid_database_mappingdf",
+ return_value="syn123"),\
+ patch.object(process_functions, "getDatabaseSynId",
+ return_value="syn1234"),\
+ patch.object(process_functions, "get_syntabledf",
+ return_value=uniq_seq_df),\
+ patch.object(process_functions, "get_gdc_data_dictionary",
+ return_value=test_dict) as patch_get_gdc:
+ error, warning = ASSAY_INFO._validate(assay_info_df, "syn9999")
+ assert error == ''
+ assert warning == ''
+ patch_get_gdc.assert_called()
+
+
+def test_underscore__validate():
+ """Valid input should have no errors or warnings"""
+ assay_info_dict = {
+ 'SEQ_ASSAY_ID': ['SAGE_1', 'SAGE-3'],
+ 'is_paired_end': [True, False],
+ 'library_strategy': ['value1', 'value2'],
+ 'library_selection': ['value1', 'value2'],
+ 'platform': ['value1', 'value2'],
+ 'instrument_model': ['value1', 'value2'],
+ 'target_capture_kit': ['value1', 'value2'],
+ 'variant_classifications': ['Frame_Shift_Ins', 'Frame_Shift_Ins'],
+ 'read_length': [22, float('nan')],
+ 'number_of_genes': [5, 20],
+ 'gene_padding': [10, None],
+ 'calling_strategy': ['tumor_only', 'tumor_normal'],
+ 'specimen_tumor_cellularity': ['>10%', '>20%'],
+ 'alteration_types': ['snv;small_indels', 'intragenic_cna'],
+ 'preservation_technique': ['FFPE', 'FFPE;fresh_frozen'],
+ 'coverage': ['hotspot_regions;introns', 'introns']}
+ uniq_seq_df = pd.DataFrame({"seq": ["SAGE-1", "SAGE-3"]})
+ assay_info_df = pd.DataFrame(assay_info_dict)
+ test_dict = copy.deepcopy(GDC_DATA_DICT)
+ with patch.object(process_functions, "get_synid_database_mappingdf",
+ return_value="syn123"),\
+ patch.object(process_functions, "getDatabaseSynId",
+ return_value="syn1234"),\
+ patch.object(process_functions, "get_syntabledf",
+ return_value=uniq_seq_df),\
+ patch.object(process_functions, "get_gdc_data_dictionary",
+ return_value=test_dict) as patch_get_gdc:
+ error, warning = ASSAY_INFO._validate(assay_info_df, "syn9999")
+ assert error == ''
+ assert warning == ''
+ patch_get_gdc.assert_called()
+
+
def test__missingcols__validate():
"""Test missing columns"""
assay_info_df = pd.DataFrame()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | 12.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest_v2",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"black"
],
"pre_install": [
"apt-get update",
"apt-get install -y bedtools"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest -vs tests/"
} | -e git+https://github.com/Sage-Bionetworks/Genie.git@0485b7e1599c4894714a92d5e12e99cc2b0cd02d#egg=aacrgenie
anyio==4.9.0
async-lru==2.0.5
asyncio-atexit==1.0.1
black==25.1.0
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
click==8.1.8
Deprecated==1.2.18
exceptiongroup==1.2.2
googleapis-common-protos==1.69.2
h11==0.14.0
httpcore==1.0.7
httplib2==0.22.0
httpx==0.27.2
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
mypy-extensions==1.0.0
natsort==8.4.0
ncls==0.0.68
nest-asyncio==1.6.0
numpy==2.0.2
opentelemetry-api==1.31.1
opentelemetry-exporter-otlp-proto-common==1.31.1
opentelemetry-exporter-otlp-proto-http==1.31.1
opentelemetry-instrumentation==0.52b1
opentelemetry-instrumentation-httpx==0.52b1
opentelemetry-instrumentation-requests==0.52b1
opentelemetry-instrumentation-threading==0.52b1
opentelemetry-instrumentation-urllib==0.52b1
opentelemetry-proto==1.31.1
opentelemetry-sdk==1.31.1
opentelemetry-semantic-conventions==0.52b1
opentelemetry-util-http==0.52b1
packaging==24.2
pandas==2.2.3
pathspec==0.12.1
platformdirs==4.3.7
pluggy==1.5.0
protobuf==5.29.4
psutil==5.9.8
pycryptodome==3.22.0
pyparsing==3.2.3
pyranges==0.1.4
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
requests==2.32.3
six==1.17.0
sniffio==1.3.1
sorted-nearest==0.0.39
synapseclient==4.7.0
tabulate==0.9.0
tomli==2.2.1
tqdm==4.67.1
typing_extensions==4.13.0
tzdata==2025.2
urllib3==1.26.20
wrapt==1.17.2
zipp==3.21.0
| name: Genie
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aacrgenie==12.7.0
- anyio==4.9.0
- async-lru==2.0.5
- asyncio-atexit==1.0.1
- black==25.1.0
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- click==8.1.8
- deprecated==1.2.18
- exceptiongroup==1.2.2
- googleapis-common-protos==1.69.2
- h11==0.14.0
- httpcore==1.0.7
- httplib2==0.22.0
- httpx==0.27.2
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- mypy-extensions==1.0.0
- natsort==8.4.0
- ncls==0.0.68
- nest-asyncio==1.6.0
- numpy==2.0.2
- opentelemetry-api==1.31.1
- opentelemetry-exporter-otlp-proto-common==1.31.1
- opentelemetry-exporter-otlp-proto-http==1.31.1
- opentelemetry-instrumentation==0.52b1
- opentelemetry-instrumentation-httpx==0.52b1
- opentelemetry-instrumentation-requests==0.52b1
- opentelemetry-instrumentation-threading==0.52b1
- opentelemetry-instrumentation-urllib==0.52b1
- opentelemetry-proto==1.31.1
- opentelemetry-sdk==1.31.1
- opentelemetry-semantic-conventions==0.52b1
- opentelemetry-util-http==0.52b1
- packaging==24.2
- pandas==2.2.3
- pathspec==0.12.1
- platformdirs==4.3.7
- pluggy==1.5.0
- protobuf==5.29.4
- psutil==5.9.8
- pycryptodome==3.22.0
- pyparsing==3.2.3
- pyranges==0.1.4
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- requests==2.32.3
- six==1.17.0
- sniffio==1.3.1
- sorted-nearest==0.0.39
- synapseclient==4.7.0
- tabulate==0.9.0
- tomli==2.2.1
- tqdm==4.67.1
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==1.26.20
- wrapt==1.17.2
- zipp==3.21.0
prefix: /opt/conda/envs/Genie
| [
"tests/test_assay.py::test_case__validate",
"tests/test_assay.py::test_underscore__validate"
] | [] | [
"tests/test_assay.py::test_filetype",
"tests/test_assay.py::test_invalidname__validatefilename",
"tests/test_assay.py::test_correct__validatefilename",
"tests/test_assay.py::test_validinput__validate",
"tests/test_assay.py::test__missingcols__validate",
"tests/test_assay.py::test_fillcols__process",
"tests/test_assay.py::test_default10__process",
"tests/test_assay.py::test_invalid__validate"
] | [] | MIT License | 12,145 | 709 | [
"genie/process_functions.py",
"genie_registry/assay.py"
] |
|
encode__starlette-1472 | a6d3d8f0179cb1c2ac283bb5db779f6075854a66 | 2022-02-03 11:41:58 | e7c1858146ed2ae5082345823cb99bd69f7c67e0 | diff --git a/starlette/websockets.py b/starlette/websockets.py
index da74060..03ed199 100644
--- a/starlette/websockets.py
+++ b/starlette/websockets.py
@@ -34,13 +34,21 @@ class WebSocket(HTTPConnection):
if self.client_state == WebSocketState.CONNECTING:
message = await self._receive()
message_type = message["type"]
- assert message_type == "websocket.connect"
+ if message_type != "websocket.connect":
+ raise RuntimeError(
+ 'Expected ASGI message "websocket.connect", '
+ f"but got {message_type!r}"
+ )
self.client_state = WebSocketState.CONNECTED
return message
elif self.client_state == WebSocketState.CONNECTED:
message = await self._receive()
message_type = message["type"]
- assert message_type in {"websocket.receive", "websocket.disconnect"}
+ if message_type not in {"websocket.receive", "websocket.disconnect"}:
+ raise RuntimeError(
+ 'Expected ASGI message "websocket.receive" or '
+ f'"websocket.disconnect", but got {message_type!r}'
+ )
if message_type == "websocket.disconnect":
self.client_state = WebSocketState.DISCONNECTED
return message
@@ -55,7 +63,11 @@ class WebSocket(HTTPConnection):
"""
if self.application_state == WebSocketState.CONNECTING:
message_type = message["type"]
- assert message_type in {"websocket.accept", "websocket.close"}
+ if message_type not in {"websocket.accept", "websocket.close"}:
+ raise RuntimeError(
+ 'Expected ASGI message "websocket.connect", '
+ f"but got {message_type!r}"
+ )
if message_type == "websocket.close":
self.application_state = WebSocketState.DISCONNECTED
else:
@@ -63,7 +75,11 @@ class WebSocket(HTTPConnection):
await self._send(message)
elif self.application_state == WebSocketState.CONNECTED:
message_type = message["type"]
- assert message_type in {"websocket.send", "websocket.close"}
+ if message_type not in {"websocket.send", "websocket.close"}:
+ raise RuntimeError(
+ 'Expected ASGI message "websocket.send" or "websocket.close", '
+ f"but got {message_type!r}"
+ )
if message_type == "websocket.close":
self.application_state = WebSocketState.DISCONNECTED
await self._send(message)
@@ -89,20 +105,30 @@ class WebSocket(HTTPConnection):
raise WebSocketDisconnect(message["code"])
async def receive_text(self) -> str:
- assert self.application_state == WebSocketState.CONNECTED
+ if self.application_state != WebSocketState.CONNECTED:
+ raise RuntimeError(
+ 'WebSocket is not connected. Need to call "accept" first.'
+ )
message = await self.receive()
self._raise_on_disconnect(message)
return message["text"]
async def receive_bytes(self) -> bytes:
- assert self.application_state == WebSocketState.CONNECTED
+ if self.application_state != WebSocketState.CONNECTED:
+ raise RuntimeError(
+ 'WebSocket is not connected. Need to call "accept" first.'
+ )
message = await self.receive()
self._raise_on_disconnect(message)
return message["bytes"]
async def receive_json(self, mode: str = "text") -> typing.Any:
- assert mode in ["text", "binary"]
- assert self.application_state == WebSocketState.CONNECTED
+ if mode not in {"text", "binary"}:
+ raise RuntimeError('The "mode" argument should be "text" or "binary".')
+ if self.application_state != WebSocketState.CONNECTED:
+ raise RuntimeError(
+ 'WebSocket is not connected. Need to call "accept" first.'
+ )
message = await self.receive()
self._raise_on_disconnect(message)
@@ -140,7 +166,8 @@ class WebSocket(HTTPConnection):
await self.send({"type": "websocket.send", "bytes": data})
async def send_json(self, data: typing.Any, mode: str = "text") -> None:
- assert mode in ["text", "binary"]
+ if mode not in {"text", "binary"}:
+ raise RuntimeError('The "mode" argument should be "text" or "binary".')
text = json.dumps(data)
if mode == "text":
await self.send({"type": "websocket.send", "text": text})
| Where assert statements are guarding against invalid ASGI messaging, use RuntimeError instead.
### Checklist
- [X] There are no similar issues or pull requests for this yet.
- [X] I discussed this idea on the [community chat](https://gitter.im/encode/community) and feedback is positive.
### Is your feature related to a problem? Please describe.
There are `assert` statements in the source code which raise a vague and hard to debug `AssertionError`. For example on [this line](https://github.com/encode/starlette/blob/f12c92a21500d484b3d48f965bb605c1bbe193bc/starlette/websockets.py#L58).
If some kind of exception (for example something along the lines of: `WebSocketMessageType`) were raised it would make debugging a lot clearer. I spent a lot more time than I should have just working out where exactly this `AssertionError` was coming from and what the root cause was.
### Describe the solution you would like.
This is by no means the right solution but at least it's an idea of the kind of thing that might help:
```python
class WebSocketMessageType(Exception):
pass
class WebSocket(HTTPConnection):
...
async def send(self, message: Message) -> None:
"""
Send ASGI websocket messages, ensuring valid state transitions.
"""
if self.application_state == WebSocketState.CONNECTING:
message_type = message["type"]
if message_type not in {"websocket.accept", "websocket.close"}:
raise WebSocketMessageType("expected message_type to be websocket.accept or websocket.close")
if message_type == "websocket.close":
self.application_state = WebSocketState.DISCONNECTED
else:
self.application_state = WebSocketState.CONNECTED
await self._send(message)
elif self.application_state == WebSocketState.CONNECTED:
message_type = message["type"]
if message_type not in {"websocket.send", "websocket.close"}:
raise WebSocketMessageType("expected message_type to be websocket.send or websocket.close")
if message_type == "websocket.close":
self.application_state = WebSocketState.DISCONNECTED
await self._send(message)
else:
raise RuntimeError('Cannot call "send" once a close message has been sent.')
```
### Describe alternatives you considered
_No response_
### Additional context
The error I was seeing:
```
ERROR root:a_file.py:31 {'message': 'Job processing failed', 'job': <Job coro=<<coroutine object a_class.a_method at 0x7f6d7a7c1ec0>>>, 'exception': AssertionError()}
NoneType: None
```
And this would be it with a `raise` statement: admittedly there is still no mention of `starlette` so a user would still have to diagnose that as the root cause.
```
ERROR root:a_file.py:31 {'message': 'Job processing failed', 'job': <Job coro=<<coroutine object a_class.a_method at 0x7fb99c2ed940>>>, 'exception': WebSocketMessageType('expected message_type to be websocket.accept or websocket.close')}
NoneType: None
```
Also, I have no idea where that `NoneType: None` is coming from or what that means. | encode/starlette | diff --git a/tests/test_websockets.py b/tests/test_websockets.py
index b11685c..e3a5276 100644
--- a/tests/test_websockets.py
+++ b/tests/test_websockets.py
@@ -2,7 +2,7 @@ import anyio
import pytest
from starlette import status
-from starlette.websockets import WebSocket, WebSocketDisconnect
+from starlette.websockets import WebSocket, WebSocketDisconnect, WebSocketState
def test_websocket_url(test_client_factory):
@@ -422,3 +422,135 @@ def test_websocket_close_reason(test_client_factory) -> None:
websocket.receive_text()
assert exc.value.code == status.WS_1001_GOING_AWAY
assert exc.value.reason == "Going Away"
+
+
+def test_send_json_invalid_mode(test_client_factory):
+ def app(scope):
+ async def asgi(receive, send):
+ websocket = WebSocket(scope, receive=receive, send=send)
+ await websocket.accept()
+ await websocket.send_json({}, mode="invalid")
+
+ return asgi
+
+ client = test_client_factory(app)
+ with pytest.raises(RuntimeError):
+ with client.websocket_connect("/"):
+ pass # pragma: nocover
+
+
+def test_receive_json_invalid_mode(test_client_factory):
+ def app(scope):
+ async def asgi(receive, send):
+ websocket = WebSocket(scope, receive=receive, send=send)
+ await websocket.accept()
+ await websocket.receive_json(mode="invalid")
+
+ return asgi
+
+ client = test_client_factory(app)
+ with pytest.raises(RuntimeError):
+ with client.websocket_connect("/"):
+ pass # pragma: nocover
+
+
+def test_receive_text_before_accept(test_client_factory):
+ def app(scope):
+ async def asgi(receive, send):
+ websocket = WebSocket(scope, receive=receive, send=send)
+ await websocket.receive_text()
+
+ return asgi
+
+ client = test_client_factory(app)
+ with pytest.raises(RuntimeError):
+ with client.websocket_connect("/"):
+ pass # pragma: nocover
+
+
+def test_receive_bytes_before_accept(test_client_factory):
+ def app(scope):
+ async def asgi(receive, send):
+ websocket = WebSocket(scope, receive=receive, send=send)
+ await websocket.receive_bytes()
+
+ return asgi
+
+ client = test_client_factory(app)
+ with pytest.raises(RuntimeError):
+ with client.websocket_connect("/"):
+ pass # pragma: nocover
+
+
+def test_receive_json_before_accept(test_client_factory):
+ def app(scope):
+ async def asgi(receive, send):
+ websocket = WebSocket(scope, receive=receive, send=send)
+ await websocket.receive_json()
+
+ return asgi
+
+ client = test_client_factory(app)
+ with pytest.raises(RuntimeError):
+ with client.websocket_connect("/"):
+ pass # pragma: nocover
+
+
+def test_send_before_accept(test_client_factory):
+ def app(scope):
+ async def asgi(receive, send):
+ websocket = WebSocket(scope, receive=receive, send=send)
+ await websocket.send({"type": "websocket.send"})
+
+ return asgi
+
+ client = test_client_factory(app)
+ with pytest.raises(RuntimeError):
+ with client.websocket_connect("/"):
+ pass # pragma: nocover
+
+
+def test_send_wrong_message_type(test_client_factory):
+ def app(scope):
+ async def asgi(receive, send):
+ websocket = WebSocket(scope, receive=receive, send=send)
+ await websocket.send({"type": "websocket.accept"})
+ await websocket.send({"type": "websocket.accept"})
+
+ return asgi
+
+ client = test_client_factory(app)
+ with pytest.raises(RuntimeError):
+ with client.websocket_connect("/"):
+ pass # pragma: nocover
+
+
+def test_receive_before_accept(test_client_factory):
+ def app(scope):
+ async def asgi(receive, send):
+ websocket = WebSocket(scope, receive=receive, send=send)
+ await websocket.accept()
+ websocket.client_state = WebSocketState.CONNECTING
+ await websocket.receive()
+
+ return asgi
+
+ client = test_client_factory(app)
+ with pytest.raises(RuntimeError):
+ with client.websocket_connect("/") as websocket:
+ websocket.send({"type": "websocket.send"})
+
+
+def test_receive_wrong_message_type(test_client_factory):
+ def app(scope):
+ async def asgi(receive, send):
+ websocket = WebSocket(scope, receive=receive, send=send)
+ await websocket.accept()
+ await websocket.receive()
+
+ return asgi
+
+ client = test_client_factory(app)
+ with pytest.raises(RuntimeError):
+ with client.websocket_connect("/") as websocket:
+ websocket.send({"type": "websocket.connect"})
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.18 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[full]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": null,
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiosqlite==0.19.0
anyio==3.7.1
async-generator==1.10
attrs==24.2.0
autoflake==1.4
black==22.1.0
bleach==6.0.0
certifi @ file:///croot/certifi_1671487769961/work/certifi
cffi==1.15.1
charset-normalizer==3.4.1
click==8.1.8
colorama==0.4.6
coverage==6.2
cryptography==44.0.2
databases==0.5.3
docutils==0.20.1
exceptiongroup==1.2.2
execnet==2.0.2
flake8==4.0.1
ghp-import==2.1.0
greenlet==3.1.1
idna==3.10
importlib-metadata==4.2.0
iniconfig==2.0.0
isort==5.10.1
itsdangerous==2.1.2
jaraco.classes==3.2.3
jeepney==0.9.0
Jinja2==3.1.6
keyring==23.9.3
Markdown==3.3.4
MarkupSafe==2.1.5
mccabe==0.6.1
mergedeep==1.3.4
mkautodoc==0.1.0
mkdocs==1.2.3
mkdocs-material==8.1.3
mkdocs-material-extensions==1.2
more-itertools==9.1.0
mypy==0.931
mypy-extensions==1.0.0
outcome==1.3.0.post0
packaging==24.0
pathspec==0.11.2
pkginfo==1.10.0
platformdirs==4.0.0
pluggy==1.2.0
py==1.11.0
pycodestyle==2.8.0
pycparser==2.21
pyflakes==2.4.0
Pygments==2.17.2
pymdown-extensions==10.2.1
pytest==7.4.4
pytest-asyncio==0.21.2
pytest-cov==4.1.0
pytest-mock==3.11.1
pytest-xdist==3.5.0
python-dateutil==2.9.0.post0
python-multipart==0.0.8
PyYAML==6.0.1
pyyaml_env_tag==0.1
readme-renderer==37.3
requests==2.31.0
requests-toolbelt==1.0.0
rfc3986==2.0.0
SecretStorage==3.3.3
six==1.17.0
sniffio==1.3.1
sortedcontainers==2.4.0
SQLAlchemy==1.4.54
-e git+https://github.com/encode/starlette.git@a6d3d8f0179cb1c2ac283bb5db779f6075854a66#egg=starlette
toml==0.10.2
tomli==2.0.1
tqdm==4.67.1
trio==0.19.0
twine==3.7.1
typed-ast==1.5.5
types-contextvars==2.4.2
types-dataclasses==0.6.2
types-PyYAML==6.0.4
types-requests==2.26.3
typing_extensions==4.7.1
urllib3==2.0.7
watchdog==3.0.0
webencodings==0.5.1
zipp==3.15.0
| name: starlette
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiosqlite==0.19.0
- anyio==3.7.1
- async-generator==1.10
- attrs==24.2.0
- autoflake==1.4
- black==22.1.0
- bleach==6.0.0
- cffi==1.15.1
- charset-normalizer==3.4.1
- click==8.1.8
- colorama==0.4.6
- coverage==6.2
- cryptography==44.0.2
- databases==0.5.3
- docutils==0.20.1
- exceptiongroup==1.2.2
- execnet==2.0.2
- flake8==4.0.1
- ghp-import==2.1.0
- greenlet==3.1.1
- idna==3.10
- importlib-metadata==4.2.0
- iniconfig==2.0.0
- isort==5.10.1
- itsdangerous==2.1.2
- jaraco-classes==3.2.3
- jeepney==0.9.0
- jinja2==3.1.6
- keyring==23.9.3
- markdown==3.3.4
- markupsafe==2.1.5
- mccabe==0.6.1
- mergedeep==1.3.4
- mkautodoc==0.1.0
- mkdocs==1.2.3
- mkdocs-material==8.1.3
- mkdocs-material-extensions==1.2
- more-itertools==9.1.0
- mypy==0.931
- mypy-extensions==1.0.0
- outcome==1.3.0.post0
- packaging==24.0
- pathspec==0.11.2
- pkginfo==1.10.0
- platformdirs==4.0.0
- pluggy==1.2.0
- py==1.11.0
- pycodestyle==2.8.0
- pycparser==2.21
- pyflakes==2.4.0
- pygments==2.17.2
- pymdown-extensions==10.2.1
- pytest==7.4.4
- pytest-asyncio==0.21.2
- pytest-cov==4.1.0
- pytest-mock==3.11.1
- pytest-xdist==3.5.0
- python-dateutil==2.9.0.post0
- python-multipart==0.0.8
- pyyaml==6.0.1
- pyyaml-env-tag==0.1
- readme-renderer==37.3
- requests==2.31.0
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- secretstorage==3.3.3
- six==1.17.0
- sniffio==1.3.1
- sortedcontainers==2.4.0
- sqlalchemy==1.4.54
- toml==0.10.2
- tomli==2.0.1
- tqdm==4.67.1
- trio==0.19.0
- twine==3.7.1
- typed-ast==1.5.5
- types-contextvars==2.4.2
- types-dataclasses==0.6.2
- types-pyyaml==6.0.4
- types-requests==2.26.3
- typing-extensions==4.7.1
- urllib3==2.0.7
- watchdog==3.0.0
- webencodings==0.5.1
- wheel==0.37.1
- zipp==3.15.0
prefix: /opt/conda/envs/starlette
| [
"tests/test_websockets.py::test_send_json_invalid_mode[asyncio]",
"tests/test_websockets.py::test_receive_json_invalid_mode[asyncio]",
"tests/test_websockets.py::test_receive_text_before_accept[asyncio]",
"tests/test_websockets.py::test_receive_bytes_before_accept[asyncio]",
"tests/test_websockets.py::test_receive_json_before_accept[asyncio]",
"tests/test_websockets.py::test_send_before_accept[asyncio]",
"tests/test_websockets.py::test_send_wrong_message_type[asyncio]",
"tests/test_websockets.py::test_receive_before_accept[asyncio]",
"tests/test_websockets.py::test_receive_wrong_message_type[asyncio]"
] | [
"tests/test_websockets.py::test_websocket_url[trio]",
"tests/test_websockets.py::test_websocket_binary_json[trio]",
"tests/test_websockets.py::test_websocket_query_params[trio]",
"tests/test_websockets.py::test_websocket_headers[trio]",
"tests/test_websockets.py::test_websocket_port[trio]",
"tests/test_websockets.py::test_websocket_send_and_receive_text[trio]",
"tests/test_websockets.py::test_websocket_send_and_receive_bytes[trio]",
"tests/test_websockets.py::test_websocket_send_and_receive_json[trio]",
"tests/test_websockets.py::test_websocket_iter_text[trio]",
"tests/test_websockets.py::test_websocket_iter_bytes[trio]",
"tests/test_websockets.py::test_websocket_iter_json[trio]",
"tests/test_websockets.py::test_websocket_concurrency_pattern[trio]",
"tests/test_websockets.py::test_client_close[trio]",
"tests/test_websockets.py::test_application_close[trio]",
"tests/test_websockets.py::test_rejected_connection[trio]",
"tests/test_websockets.py::test_subprotocol[trio]",
"tests/test_websockets.py::test_additional_headers[trio]",
"tests/test_websockets.py::test_no_additional_headers[trio]",
"tests/test_websockets.py::test_websocket_exception[trio]",
"tests/test_websockets.py::test_duplicate_close[trio]",
"tests/test_websockets.py::test_duplicate_disconnect[trio]",
"tests/test_websockets.py::test_websocket_close_reason[trio]",
"tests/test_websockets.py::test_send_json_invalid_mode[trio]",
"tests/test_websockets.py::test_receive_json_invalid_mode[trio]",
"tests/test_websockets.py::test_receive_text_before_accept[trio]",
"tests/test_websockets.py::test_receive_bytes_before_accept[trio]",
"tests/test_websockets.py::test_receive_json_before_accept[trio]",
"tests/test_websockets.py::test_send_before_accept[trio]",
"tests/test_websockets.py::test_send_wrong_message_type[trio]",
"tests/test_websockets.py::test_receive_before_accept[trio]",
"tests/test_websockets.py::test_receive_wrong_message_type[trio]"
] | [
"tests/test_websockets.py::test_websocket_url[asyncio]",
"tests/test_websockets.py::test_websocket_binary_json[asyncio]",
"tests/test_websockets.py::test_websocket_query_params[asyncio]",
"tests/test_websockets.py::test_websocket_headers[asyncio]",
"tests/test_websockets.py::test_websocket_port[asyncio]",
"tests/test_websockets.py::test_websocket_send_and_receive_text[asyncio]",
"tests/test_websockets.py::test_websocket_send_and_receive_bytes[asyncio]",
"tests/test_websockets.py::test_websocket_send_and_receive_json[asyncio]",
"tests/test_websockets.py::test_websocket_iter_text[asyncio]",
"tests/test_websockets.py::test_websocket_iter_bytes[asyncio]",
"tests/test_websockets.py::test_websocket_iter_json[asyncio]",
"tests/test_websockets.py::test_websocket_concurrency_pattern[asyncio]",
"tests/test_websockets.py::test_client_close[asyncio]",
"tests/test_websockets.py::test_application_close[asyncio]",
"tests/test_websockets.py::test_rejected_connection[asyncio]",
"tests/test_websockets.py::test_subprotocol[asyncio]",
"tests/test_websockets.py::test_additional_headers[asyncio]",
"tests/test_websockets.py::test_no_additional_headers[asyncio]",
"tests/test_websockets.py::test_websocket_exception[asyncio]",
"tests/test_websockets.py::test_duplicate_close[asyncio]",
"tests/test_websockets.py::test_duplicate_disconnect[asyncio]",
"tests/test_websockets.py::test_websocket_scope_interface",
"tests/test_websockets.py::test_websocket_close_reason[asyncio]"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,149 | 992 | [
"starlette/websockets.py"
] |
|
onicagroup__runway-1249 | 7070937a9d02b4e4e92435e8f28962196627d0a9 | 2022-02-03 14:01:55 | 644d67ca6e74e07928fe89040b0a1b382fca34cb | codecov[bot]: # [Codecov](https://codecov.io/gh/onicagroup/runway/pull/1249?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup) Report
> Merging [#1249](https://codecov.io/gh/onicagroup/runway/pull/1249?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup) (56369b3) into [master](https://codecov.io/gh/onicagroup/runway/commit/7070937a9d02b4e4e92435e8f28962196627d0a9?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup) (7070937) will **decrease** coverage by `0.00%`.
> The diff coverage is `100.00%`.
[](https://codecov.io/gh/onicagroup/runway/pull/1249?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup)
```diff
@@ Coverage Diff @@
## master #1249 +/- ##
==========================================
- Coverage 89.67% 89.67% -0.01%
==========================================
Files 247 247
Lines 15231 15240 +9
==========================================
+ Hits 13659 13666 +7
- Misses 1572 1574 +2
```
| [Impacted Files](https://codecov.io/gh/onicagroup/runway/pull/1249?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup) | Coverage Δ | |
|---|---|---|
| [runway/cfngin/hooks/command.py](https://codecov.io/gh/onicagroup/runway/pull/1249/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup#diff-cnVud2F5L2Nmbmdpbi9ob29rcy9jb21tYW5kLnB5) | `98.11% <100.00%> (+7.20%)` | :arrow_up: |
| [runway/cfngin/hooks/aws\_lambda.py](https://codecov.io/gh/onicagroup/runway/pull/1249/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup#diff-cnVud2F5L2Nmbmdpbi9ob29rcy9hd3NfbGFtYmRhLnB5) | `84.10% <0.00%> (-1.66%)` | :arrow_down: |
| [runway/env\_mgr/kbenv.py](https://codecov.io/gh/onicagroup/runway/pull/1249/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup#diff-cnVud2F5L2Vudl9tZ3Iva2JlbnYucHk=) | `78.76% <0.00%> (-0.69%)` | :arrow_down: |
| [runway/env\_mgr/tfenv.py](https://codecov.io/gh/onicagroup/runway/pull/1249/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup#diff-cnVud2F5L2Vudl9tZ3IvdGZlbnYucHk=) | `96.46% <0.00%> (-0.51%)` | :arrow_down: |
| [runway/cfngin/exceptions.py](https://codecov.io/gh/onicagroup/runway/pull/1249/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup#diff-cnVud2F5L2Nmbmdpbi9leGNlcHRpb25zLnB5) | `97.15% <0.00%> (+0.94%)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/onicagroup/runway/pull/1249?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/onicagroup/runway/pull/1249?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup). Last update [7070937...56369b3](https://codecov.io/gh/onicagroup/runway/pull/1249?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=onicagroup).
| diff --git a/runway/cfngin/hooks/command.py b/runway/cfngin/hooks/command.py
index 519294f1..f267e534 100644
--- a/runway/cfngin/hooks/command.py
+++ b/runway/cfngin/hooks/command.py
@@ -1,16 +1,51 @@
"""Command hook."""
import logging
import os
-from subprocess import PIPE, Popen
+import subprocess
from typing import Any, Dict, List, Optional, Union
from typing_extensions import TypedDict
+from ...utils import BaseModel
from ..exceptions import ImproperlyConfigured
LOGGER = logging.getLogger(__name__)
+class RunCommandHookArgs(BaseModel):
+ """Hook arguments for ``run_command``."""
+
+ capture: bool = False
+ """If enabled, capture the command's stdout and stderr, and return them in the hook result."""
+
+ command: Union[str, List[str]]
+ """Command(s) to run."""
+
+ env: Optional[Dict[str, str]] = None
+ """Dictionary of environment variable overrides for the command context.
+ Will be merged with the current environment.
+
+ """
+
+ ignore_status: bool = False
+ """Don't fail the hook if the command returns a non-zero status."""
+
+ interactive: bool = False
+ """If enabled, allow the command to interact with stdin.
+ Otherwise, stdin will be set to the null device.
+
+ """
+
+ quiet: bool = False
+ """Redirect the command's stdout and stderr to the null device, silencing all output.
+ Should not be enabled if ``capture`` is also enabled.
+
+ """
+
+ stdin: Optional[str] = None
+ """String to send to the stdin of the command. Implicitly disables ``interactive``."""
+
+
class RunCommandResponseTypeDef(TypedDict, total=False):
"""Response from run_command."""
@@ -19,36 +54,10 @@ class RunCommandResponseTypeDef(TypedDict, total=False):
stdout: str
-def run_command(
- *,
- command: Union[str, List[str]],
- capture: bool = False,
- interactive: bool = False,
- ignore_status: bool = False,
- quiet: bool = False,
- stdin: Optional[str] = None,
- env: Optional[Dict[str, str]] = None,
- **kwargs: Any,
-) -> RunCommandResponseTypeDef:
+def run_command(*__args: Any, **kwargs: Any) -> RunCommandResponseTypeDef:
"""Run a custom command as a hook.
- Args:
- command: Command(s) to run.
- capture: If enabled, capture the command's stdout and stderr,
- and return them in the hook result.
- interactive: If enabled, allow the command to interact with
- stdin. Otherwise, stdin will be set to the null device.
- ignore_status: Don't fail the hook if the command returns a
- non-zero status.
- quiet: Redirect the command's stdout and stderr to the null device,
- silencing all output. Should not be enabled if ``capture`` is also
- enabled.
- stdin: String to send to the stdin of the command. Implicitly disables
- ``interactive``.
- env: Dictionary of environment variable overrides for the command context.
- Will be merged with the current environment.
-
- Additional keyword arguments passed to the function will be forwarded to the
+ Arguments not parsed by the data model will be forwarded to the
``subprocess.Popen`` function. Interesting ones include: ``cwd`` and ``shell``.
Examples:
@@ -80,61 +89,67 @@ def run_command(
shell: true
"""
+ args = RunCommandHookArgs.parse_obj(kwargs)
+
+ # remove parsed args from kwargs
+ for field in RunCommandHookArgs.__fields__:
+ kwargs.pop(field, None)
+
# remove unneeded args from kwargs
kwargs.pop("context", None)
kwargs.pop("provider", None)
- if quiet and capture:
+ if args.quiet and args.capture:
raise ImproperlyConfigured(
__name__ + ".run_command",
ValueError("Cannot enable `quiet` and `capture` options simultaneously"),
)
with open(os.devnull, "wb") as devnull:
- if quiet:
+ if args.quiet:
out_err_type = devnull
- elif capture:
- out_err_type = PIPE
+ elif args.capture:
+ out_err_type = subprocess.PIPE
else:
out_err_type = None
- if interactive:
+ if args.interactive:
in_type = None
- elif stdin:
- in_type = PIPE
+ elif args.stdin:
+ in_type = subprocess.PIPE
else:
in_type = devnull
- if env:
+ if args.env:
full_env = os.environ.copy()
- full_env.update(env)
- env = full_env
+ full_env.update(args.env)
+ args.env = full_env
- LOGGER.info("running command: %s", command)
+ LOGGER.info("running command: %s", args.command)
- with Popen(
- command,
+ with subprocess.Popen(
+ args.command,
stdin=in_type,
stdout=out_err_type,
stderr=out_err_type,
- env=env,
+ env=args.env,
**kwargs,
) as proc:
try:
- out, err = proc.communicate(stdin)
+ out, err = proc.communicate(args.stdin)
status = proc.wait()
- if status == 0 or ignore_status:
+ if status == 0 or args.ignore_status:
return {"returncode": proc.returncode, "stdout": out, "stderr": err}
# Don't print the command line again if we already did earlier
- if LOGGER.isEnabledFor(logging.INFO):
+ if LOGGER.isEnabledFor(logging.INFO): # cov: ignore
LOGGER.warning("command failed with returncode %d", status)
else:
LOGGER.warning(
- "command failed with returncode %d: %s", status, command
+ "command failed with returncode %d: %s", status, args.command
)
return {}
- except Exception: # pylint: disable=broad-except
+ except Exception: # pylint: disable=broad-except # cov: ignore
return {}
| use pydantic to parse arguments for `runway.cfngin.hooks.command` | onicagroup/runway | diff --git a/tests/unit/cfngin/hooks/test_command.py b/tests/unit/cfngin/hooks/test_command.py
index 23f7459a..26fa9b3a 100644
--- a/tests/unit/cfngin/hooks/test_command.py
+++ b/tests/unit/cfngin/hooks/test_command.py
@@ -1,212 +1,98 @@
"""Tests for runway.cfngin.hooks.command."""
-from __future__ import annotations
-
# pylint: disable=no-self-use
# pyright: basic
-import os
-import unittest
-from subprocess import PIPE
-from typing import (
- TYPE_CHECKING,
- Any,
- ContextManager,
- List,
- Optional,
- Tuple,
- Type,
- Union,
-)
-
-import mock
+from __future__ import annotations
-from runway.cfngin.hooks.command import run_command
-from runway.config import CfnginConfig
-from runway.context import CfnginContext
+from typing import TYPE_CHECKING
+
+import pytest
-from ..factories import mock_provider
+from runway.cfngin.exceptions import ImproperlyConfigured
+from runway.cfngin.hooks.command import run_command
if TYPE_CHECKING:
- from types import TracebackType
-
-
-class MockProcess(ContextManager["MockProcess"]):
- """Mock process."""
-
- def __init__(
- self,
- returncode: int = 0,
- stdout: Optional[str] = "",
- stderr: Optional[str] = "",
- ) -> None:
- """Instantiate class."""
- self.returncode = returncode
- self.stdout = stdout
- self.stderr = stderr
- self.stdin = None
-
- def communicate(self, stdin: str) -> Tuple[Optional[str], Optional[str]]:
- """Communicate with process."""
- self.stdin = stdin
- return (self.stdout, self.stderr)
-
- def wait(self) -> int:
- """Wait for process."""
- return self.returncode
-
- def kill(self) -> None:
- """Kill process."""
- return
-
- def __enter__(self) -> MockProcess:
- """Enter the context manager."""
- return self
-
- def __exit__(
- self,
- exc_type: Optional[Type[BaseException]],
- exc_value: Optional[BaseException],
- traceback: Optional[TracebackType],
- ) -> None:
- """Exit the context manager."""
-
-
-class TestCommandHook(unittest.TestCase):
- """Tests for runway.cfngin.hooks.command."""
-
- def setUp(self) -> None:
- """Run before tests."""
- self.context = CfnginContext(
- config=CfnginConfig.parse_obj(
- {"namespace": "test", "cfngin_bucket": "test"}
- )
- )
- self.provider = mock_provider(region="us-east-1")
-
- self.mock_process = MockProcess()
- self.popen_mock = mock.patch(
- "runway.cfngin.hooks.command.Popen", return_value=self.mock_process
- ).start()
-
- def tearDown(self) -> None:
- """Run after tests."""
- self.popen_mock.stop()
-
- def run_hook(self, *, command: Union[str, List[str]], **kwargs: Any) -> Any:
- """Run hook."""
- real_kwargs = {
- "context": self.context,
- }
- real_kwargs.update(kwargs)
- return run_command(command=command, **real_kwargs)
-
- def test_command_ok(self) -> None:
- """Test command ok."""
- self.mock_process.returncode = 0
- self.mock_process.stdout = None
- self.mock_process.stderr = None
-
- results = self.run_hook(command=["foo"])
-
- self.assertEqual(results, {"returncode": 0, "stdout": None, "stderr": None})
- self.popen_mock.assert_called_once_with(
- ["foo"], stdin=mock.ANY, stdout=None, stderr=None, env=None
- )
-
- def test_command_fail(self) -> None:
- """Test command fail."""
- self.mock_process.returncode = 1
- self.mock_process.stdout = None
- self.mock_process.stderr = None
-
- results = self.run_hook(command=["foo"])
-
- self.assertEqual(results, {})
- self.popen_mock.assert_called_once_with(
- ["foo"], stdin=mock.ANY, stdout=None, stderr=None, env=None
- )
-
- def test_command_ignore_status(self) -> None:
- """Test command ignore status."""
- self.mock_process.returncode = 1
- self.mock_process.stdout = None
- self.mock_process.stderr = None
-
- results = self.run_hook(command=["foo"], ignore_status=True)
-
- self.assertEqual(results, {"returncode": 1, "stdout": None, "stderr": None})
- self.popen_mock.assert_called_once_with(
- ["foo"], stdin=mock.ANY, stdout=None, stderr=None, env=None
- )
-
- def test_command_quiet(self) -> None:
- """Test command quiet."""
- self.mock_process.returncode = 0
- self.mock_process.stdout = None
- self.mock_process.stderr = None
-
- results = self.run_hook(command=["foo"], quiet=True)
- self.assertEqual(results, {"returncode": 0, "stdout": None, "stderr": None})
-
- self.popen_mock.assert_called_once_with(
- ["foo"], stdin=mock.ANY, stdout=mock.ANY, stderr=mock.ANY, env=None
- )
-
- def test_command_interactive(self) -> None:
- """Test command interactive."""
- self.mock_process.returncode = 0
- self.mock_process.stdout = None
- self.mock_process.stderr = None
-
- results = self.run_hook(command=["foo"], interactive=True)
- self.assertEqual(results, {"returncode": 0, "stdout": None, "stderr": None})
-
- self.popen_mock.assert_called_once_with(
- ["foo"], stdin=None, stdout=None, stderr=None, env=None
- )
-
- def test_command_input(self) -> None:
- """Test command input."""
- self.mock_process.returncode = 0
- self.mock_process.stdout = None
- self.mock_process.stderr = None
-
- results = self.run_hook(command=["foo"], stdin="hello world")
- self.assertEqual(results, {"returncode": 0, "stdout": None, "stderr": None})
-
- self.popen_mock.assert_called_once_with(
- ["foo"], stdin=PIPE, stdout=None, stderr=None, env=None
- )
- self.assertEqual(self.mock_process.stdin, "hello world")
-
- def test_command_capture(self) -> None:
- """Test command capture."""
- self.mock_process.returncode = 0
- self.mock_process.stdout = "hello"
- self.mock_process.stderr = "world"
-
- results = self.run_hook(command=["foo"], capture=True)
- self.assertEqual(
- results, {"returncode": 0, "stdout": "hello", "stderr": "world"}
- )
-
- self.popen_mock.assert_called_once_with(
- ["foo"], stdin=mock.ANY, stdout=PIPE, stderr=PIPE, env=None
- )
-
- def test_command_env(self) -> None:
- """Test command env."""
- self.mock_process.returncode = 0
- self.mock_process.stdout = None
- self.mock_process.stderr = None
-
- with mock.patch.dict(os.environ, {"FOO": "bar"}, clear=True):
- results = self.run_hook(command=["foo"], env={"hello": "world"})
-
- self.assertEqual(results, {"returncode": 0, "stdout": None, "stderr": None})
- self.popen_mock.assert_called_once_with(
- ["foo"],
- stdin=mock.ANY,
- stdout=None,
- stderr=None,
- env={"hello": "world", "FOO": "bar"},
- )
+ from pytest_subprocess import FakeProcess
+
+
+def test_run_command(fake_process: FakeProcess) -> None:
+ """Test run_command."""
+ fake_process.register_subprocess(["foo"], returncode=0)
+ assert run_command(command=["foo"]) == {
+ "returncode": 0,
+ "stderr": None,
+ "stdout": None,
+ }
+
+
+def test_run_command_capture(fake_process: FakeProcess) -> None:
+ """Test run_command with ``capture``."""
+ fake_process.register_subprocess(
+ ["foo"], returncode=0, stderr="bar", stdout="foobar"
+ )
+ assert run_command(command=["foo"], capture=True) == {
+ "returncode": 0,
+ "stderr": b"bar", # for some reason, pytest-subprocess returns these as bytes
+ "stdout": b"foobar",
+ }
+
+
+def test_run_command_env(fake_process: FakeProcess) -> None:
+ """Test run_command with ``env``."""
+ fake_process.register_subprocess(["foo"], returncode=0)
+ assert run_command(command=["foo"], env={"TEST": "bar"}) == {
+ "returncode": 0,
+ "stderr": None,
+ "stdout": None,
+ }
+
+
+def test_run_command_fail(fake_process: FakeProcess) -> None:
+ """Test run_command non-zero exit code."""
+ fake_process.register_subprocess(["foo"], returncode=1)
+ assert not run_command(command=["foo"])
+
+
+def test_run_command_interactive(fake_process: FakeProcess) -> None:
+ """Test run_command with ``interactive``."""
+ fake_process.register_subprocess(["foo"], returncode=0)
+ assert run_command(command=["foo"], interactive=True) == {
+ "returncode": 0,
+ "stderr": None,
+ "stdout": None,
+ }
+
+
+def test_run_command_ignore_status(fake_process: FakeProcess) -> None:
+ """Test run_command with ``ignore_status``."""
+ fake_process.register_subprocess(["foo"], returncode=1)
+ assert run_command(command=["foo"], ignore_status=True) == {
+ "returncode": 1,
+ "stderr": None,
+ "stdout": None,
+ }
+
+
+def test_run_command_quiet(fake_process: FakeProcess) -> None:
+ """Test run_command with ``quiet``."""
+ fake_process.register_subprocess(["foo"], returncode=0, stderr="", stdout="")
+ assert run_command(command=["foo"], quiet=True) == {
+ "returncode": 0,
+ "stderr": None,
+ "stdout": None,
+ }
+
+
+def test_run_command_raise_improperly_configured() -> None:
+ """Test run_command raise ``ImproperlyConfigured``."""
+ with pytest.raises(ImproperlyConfigured):
+ run_command(command=["foo"], capture=True, quiet=True)
+
+
+def test_run_command_stdin(fake_process: FakeProcess) -> None:
+ """Test run_command with ``stdin``."""
+ fake_process.register_subprocess(["foo"], returncode=0)
+ assert run_command(command=["foo"], stdin="bar") == {
+ "returncode": 0,
+ "stderr": None,
+ "stdout": None,
+ }
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 2.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"black>=22.1",
"coverage[toml]>=6.3",
"doc8>=0.10",
"dunamai>=1.5",
"flake8>=4.0.1",
"flake8-bugbear>=21.9.2",
"flake8-comprehensions>=3.7.0",
"flake8-docstrings>=1.6",
"flake8-print>=4.0.0",
"flake8-use-fstring>=1.3",
"isort>=5.10",
"mock>=4.0",
"moto[ec2,ecs,iam,s3,ssm]>=3.0",
"mypy-boto3>=1.16",
"pep8-naming>=0.12.1",
"pipenv>=2022.1.8",
"pre-commit>=2.14",
"pydocstyle>=6.1.1",
"pyinstaller>=4.3",
"pylint>=2.12",
"pytest>=7.0",
"pytest-cov>=3.0",
"pytest-mock>=3.7",
"pytest-order>=1",
"pytest-subprocess>=1.4",
"pytest-sugar>=0.9",
"pytest-xdist>=2.5",
"semver>=2.13",
"testfixtures>=4.10",
"tomli-w>=1.0",
"boto3-stubs[acm,awslambda,cloudformation,cloudfront,cognito-idp,dynamodb,ec2,ecr,ecs,iam,kms,lambda,route53,s3,ssm,sts]>=1.16"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.8",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | altgraph==0.17.4
astroid==3.2.4
attrs==25.3.0
awacs==2.4.1
aws-sam-translator==1.95.0
black==24.8.0
boto3==1.37.23
boto3-stubs==1.37.23
botocore==1.37.23
botocore-stubs==1.37.23
bracex==2.5.post1
certifi==2025.1.31
cffi==1.17.1
cfgv==3.4.0
cfn-flip==1.3.0
cfn-lint==1.26.1
charset-normalizer==3.4.1
cli_exit_tools==1.2.7
click==8.1.8
coloredlogs==15.0
coverage==7.6.1
cryptography==44.0.2
dill==0.3.9
distlib==0.3.9
doc8==1.1.2
docker==7.1.0
docutils==0.20.1
dunamai==1.23.1
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.16.1
flake8==7.1.2
flake8-bugbear==24.12.12
flake8-comprehensions==3.15.0
flake8-docstrings==1.7.0
flake8-print==5.0.0
flake8-use-fstring==1.4
formic2==1.0.3
gitdb==4.0.12
GitPython==3.1.44
humanfriendly==10.0
identify==2.6.1
idna==3.10
igittigitt==2.1.5
importlib_metadata==8.5.0
importlib_resources==6.4.5
iniconfig==2.1.0
isort==5.13.2
Jinja2==3.1.6
jmespath==1.0.1
jsonpatch==1.33
jsonpointer==3.0.0
jsonschema==4.23.0
jsonschema-specifications==2023.12.1
lark-parser==0.10.1
lib-detect-testenv==2.0.8
MarkupSafe==2.1.5
mccabe==0.7.0
mock==5.2.0
moto==5.0.28
mpmath==1.3.0
mypy-boto3==1.37.0
mypy-boto3-acm==1.37.0
mypy-boto3-cloudformation==1.37.22
mypy-boto3-cloudfront==1.37.9
mypy-boto3-cognito-idp==1.37.13.post1
mypy-boto3-dynamodb==1.37.12
mypy-boto3-ec2==1.37.16
mypy-boto3-ecr==1.37.11
mypy-boto3-ecs==1.37.23
mypy-boto3-iam==1.37.22
mypy-boto3-kms==1.37.0
mypy-boto3-lambda==1.37.16
mypy-boto3-route53==1.37.15
mypy-boto3-s3==1.37.0
mypy-boto3-ssm==1.37.19
mypy-boto3-sts==1.37.0
mypy-extensions==1.0.0
networkx==3.1
nodeenv==1.9.1
packaging==24.2
pathspec==0.12.1
pbr==6.1.1
pep8-naming==0.14.1
pipenv==2024.4.1
pkgutil_resolve_name==1.3.10
platformdirs==4.3.6
pluggy==1.5.0
pre-commit==3.5.0
py-partiql-parser==0.6.1
pycodestyle==2.12.1
pycparser==2.22
pydantic==1.10.21
pydocstyle==6.3.0
pyflakes==3.2.0
Pygments==2.19.1
pyhcl==0.4.5
pyinstaller==6.12.0
pyinstaller-hooks-contrib==2025.2
pylint==3.2.7
pyOpenSSL==25.0.0
pytest==8.3.5
pytest-cov==5.0.0
pytest-mock==3.14.0
pytest-order==1.3.0
pytest-subprocess==1.5.3
pytest-sugar==1.0.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
python-hcl2==2.0.3
PyYAML==6.0.2
referencing==0.35.1
regex==2024.11.6
requests==2.32.3
responses==0.25.7
restructuredtext_lint==1.4.0
rpds-py==0.20.1
-e git+https://github.com/onicagroup/runway.git@7070937a9d02b4e4e92435e8f28962196627d0a9#egg=runway
s3transfer==0.11.4
semver==3.0.4
Send2Trash==1.8.3
six==1.17.0
smmap==5.0.2
snowballstemmer==2.2.0
stevedore==5.3.0
sympy==1.13.3
termcolor==2.4.0
testfixtures==8.3.0
tomli==2.2.1
tomli_w==1.0.0
tomlkit==0.13.2
troposphere==3.2.2
types-awscrt==0.24.2
types-s3transfer==0.11.4
typing_extensions==4.13.0
urllib3==1.26.20
virtualenv==20.29.3
wcmatch==10.0
Werkzeug==3.0.6
xmltodict==0.14.2
yamllint==1.35.1
zipp==3.20.2
| name: runway
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=24.2=py38h06a4308_0
- python=3.8.20=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.1.0=py38h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.44.0=py38h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- altgraph==0.17.4
- astroid==3.2.4
- attrs==25.3.0
- awacs==2.4.1
- aws-sam-translator==1.95.0
- black==24.8.0
- boto3==1.37.23
- boto3-stubs==1.37.23
- botocore==1.37.23
- botocore-stubs==1.37.23
- bracex==2.5.post1
- certifi==2025.1.31
- cffi==1.17.1
- cfgv==3.4.0
- cfn-flip==1.3.0
- cfn-lint==1.26.1
- charset-normalizer==3.4.1
- cli-exit-tools==1.2.7
- click==8.1.8
- coloredlogs==15.0
- coverage==7.6.1
- cryptography==44.0.2
- dill==0.3.9
- distlib==0.3.9
- doc8==1.1.2
- docker==7.1.0
- docutils==0.20.1
- dunamai==1.23.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.16.1
- flake8==7.1.2
- flake8-bugbear==24.12.12
- flake8-comprehensions==3.15.0
- flake8-docstrings==1.7.0
- flake8-print==5.0.0
- flake8-use-fstring==1.4
- formic2==1.0.3
- gitdb==4.0.12
- gitpython==3.1.44
- humanfriendly==10.0
- identify==2.6.1
- idna==3.10
- igittigitt==2.1.5
- importlib-metadata==8.5.0
- importlib-resources==6.4.5
- iniconfig==2.1.0
- isort==5.13.2
- jinja2==3.1.6
- jmespath==1.0.1
- jsonpatch==1.33
- jsonpointer==3.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2023.12.1
- lark-parser==0.10.1
- lib-detect-testenv==2.0.8
- markupsafe==2.1.5
- mccabe==0.7.0
- mock==5.2.0
- moto==5.0.28
- mpmath==1.3.0
- mypy-boto3==1.37.0
- mypy-boto3-acm==1.37.0
- mypy-boto3-cloudformation==1.37.22
- mypy-boto3-cloudfront==1.37.9
- mypy-boto3-cognito-idp==1.37.13.post1
- mypy-boto3-dynamodb==1.37.12
- mypy-boto3-ec2==1.37.16
- mypy-boto3-ecr==1.37.11
- mypy-boto3-ecs==1.37.23
- mypy-boto3-iam==1.37.22
- mypy-boto3-kms==1.37.0
- mypy-boto3-lambda==1.37.16
- mypy-boto3-route53==1.37.15
- mypy-boto3-s3==1.37.0
- mypy-boto3-ssm==1.37.19
- mypy-boto3-sts==1.37.0
- mypy-extensions==1.0.0
- networkx==3.1
- nodeenv==1.9.1
- packaging==24.2
- pathspec==0.12.1
- pbr==6.1.1
- pep8-naming==0.14.1
- pipenv==2024.4.1
- pkgutil-resolve-name==1.3.10
- platformdirs==4.3.6
- pluggy==1.5.0
- pre-commit==3.5.0
- py-partiql-parser==0.6.1
- pycodestyle==2.12.1
- pycparser==2.22
- pydantic==1.10.21
- pydocstyle==6.3.0
- pyflakes==3.2.0
- pygments==2.19.1
- pyhcl==0.4.5
- pyinstaller==6.12.0
- pyinstaller-hooks-contrib==2025.2
- pylint==3.2.7
- pyopenssl==25.0.0
- pytest==8.3.5
- pytest-cov==5.0.0
- pytest-mock==3.14.0
- pytest-order==1.3.0
- pytest-subprocess==1.5.3
- pytest-sugar==1.0.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- python-hcl2==2.0.3
- pyyaml==6.0.2
- referencing==0.35.1
- regex==2024.11.6
- requests==2.32.3
- responses==0.25.7
- restructuredtext-lint==1.4.0
- rpds-py==0.20.1
- runway==2.0.0.dev0
- s3transfer==0.11.4
- semver==3.0.4
- send2trash==1.8.3
- six==1.17.0
- smmap==5.0.2
- snowballstemmer==2.2.0
- stevedore==5.3.0
- sympy==1.13.3
- termcolor==2.4.0
- testfixtures==8.3.0
- tomli==2.2.1
- tomli-w==1.0.0
- tomlkit==0.13.2
- troposphere==3.2.2
- types-awscrt==0.24.2
- types-s3transfer==0.11.4
- typing-extensions==4.13.0
- urllib3==1.26.20
- virtualenv==20.29.3
- wcmatch==10.0
- werkzeug==3.0.6
- xmltodict==0.14.2
- yamllint==1.35.1
- zipp==3.20.2
prefix: /opt/conda/envs/runway
| [
"tests/unit/cfngin/hooks/test_command.py::test_run_command",
"tests/unit/cfngin/hooks/test_command.py::test_run_command_capture",
"tests/unit/cfngin/hooks/test_command.py::test_run_command_env",
"tests/unit/cfngin/hooks/test_command.py::test_run_command_fail",
"tests/unit/cfngin/hooks/test_command.py::test_run_command_interactive",
"tests/unit/cfngin/hooks/test_command.py::test_run_command_ignore_status",
"tests/unit/cfngin/hooks/test_command.py::test_run_command_quiet",
"tests/unit/cfngin/hooks/test_command.py::test_run_command_stdin"
] | [] | [
"tests/unit/cfngin/hooks/test_command.py::test_run_command_raise_improperly_configured"
] | [] | Apache License 2.0 | 12,151 | 1,444 | [
"runway/cfngin/hooks/command.py"
] |
joke2k__faker-1609 | fc127fa65b7d16817b1fcea6f4186ef417ed4fcc | 2022-02-08 09:52:57 | fc127fa65b7d16817b1fcea6f4186ef417ed4fcc | Kalbra: I added a test case, hope thats the right way to do it | diff --git a/faker/providers/address/de_CH/__init__.py b/faker/providers/address/de_CH/__init__.py
index d0604d9d..3a096b65 100644
--- a/faker/providers/address/de_CH/__init__.py
+++ b/faker/providers/address/de_CH/__init__.py
@@ -4,7 +4,7 @@ from .. import Provider as AddressProvider
class Provider(AddressProvider):
- city_formats = ("{{canton_name}}",)
+ city_formats = ("{{city_name}}",)
building_number_formats = ("%", "%#", "%#", "%#", "%##")
street_suffixes = ["strasse"]
street_name_formats = ("{{last_name}}{{street_suffix}}",)
@@ -22,6 +22,171 @@ class Provider(AddressProvider):
"9###",
)
+ cities = (
+ "Aarau",
+ "Adliswil",
+ "Aesch",
+ "Affoltern",
+ "Aigle",
+ "Allschwil",
+ "Altstätten",
+ "Amriswil",
+ "Arbon",
+ "Arth",
+ "Baar",
+ "Baden",
+ "Basel",
+ "Bassersdorf",
+ "Bellinzona",
+ "Belp",
+ "Bern",
+ "Bernex",
+ "Biel/Bienne",
+ "Binningen",
+ "Birsfelden",
+ "Brig-Glis",
+ "Brugg",
+ "Buchs",
+ "Bülach",
+ "Bulle",
+ "Burgdorf",
+ "Carouge",
+ "Cham",
+ "Chêne-Bougeries",
+ "Chur",
+ "Crans-Montana",
+ "Davos",
+ "Delsberg",
+ "Dietikon",
+ "Dübendorf",
+ "Ebikon",
+ "Ecublens",
+ "Einsiedeln",
+ "Emmen",
+ "Flawil",
+ "Frauenfeld",
+ "Freiburg",
+ "Freienbach",
+ "Genf",
+ "Gland",
+ "Glarus",
+ "Glarus",
+ "Gossau",
+ "Gossau",
+ "Grenchen",
+ "Herisau",
+ "Hinwil",
+ "Horgen",
+ "Horw",
+ "Illnau-Effretikon",
+ "Ittigen",
+ "Kloten",
+ "Köniz",
+ "Kreuzlingen",
+ "Kriens",
+ "Küsnacht",
+ "Küssnacht",
+ "La Chaux-de-Fonds",
+ "La Tour-de-Peilz",
+ "Lancy",
+ "Langenthal",
+ "Lausanne",
+ "Le Grand-Saconnex",
+ "Lenzburg",
+ "Liestal",
+ "Locarno",
+ "Lugano",
+ "Lutry",
+ "Luzern",
+ "Lyss",
+ "Männedorf",
+ "Martigny",
+ "Maur",
+ "Meilen",
+ "Mendrisio",
+ "Meyrin",
+ "Möhlin",
+ "Monthey",
+ "Montreux",
+ "Morges",
+ "Münchenbuchsee",
+ "Münchenstein",
+ "Münsingen",
+ "Muri",
+ "Muttenz",
+ "Naters",
+ "Neuenburg",
+ "Neuhausen",
+ "Nyon",
+ "Oberwil",
+ "Oftringen",
+ "Olten",
+ "Onex",
+ "Opfikon",
+ "Ostermundigen",
+ "Payerne",
+ "Pfäffikon",
+ "Plan-les-Ouates",
+ "Pratteln",
+ "Prilly",
+ "Pully",
+ "Rapperswil-Jona",
+ "Regensdorf",
+ "Reinach",
+ "Renens",
+ "Rheinfelden",
+ "Richterswil",
+ "Riehen",
+ "Risch",
+ "Romanshorn",
+ "Rüti",
+ "Sarnen",
+ "Schaffhausen",
+ "Schlieren",
+ "Schwyz",
+ "Siders",
+ "Sitten",
+ "Solothurn",
+ "Spiez",
+ "Spreitenbach",
+ "St. Gallen",
+ "Stäfa",
+ "Steffisburg",
+ "Steinhausen",
+ "Suhr",
+ "Sursee",
+ "Thalwil",
+ "Thônex",
+ "Thun",
+ "Urdorf",
+ "Uster",
+ "Uzwil",
+ "Val-de-Ruz",
+ "Val-de-Travers",
+ "Vernier",
+ "Versoix",
+ "Vevey",
+ "Veyrier",
+ "Villars-sur-Glâne",
+ "Volketswil",
+ "Wädenswil",
+ "Wald",
+ "Wallisellen",
+ "Weinfelden",
+ "Wettingen",
+ "Wetzikon",
+ "Wil",
+ "Winterthur",
+ "Wohlen",
+ "Worb",
+ "Yverdon-les-Bains",
+ "Zofingen",
+ "Zollikofen",
+ "Zollikon",
+ "Zug",
+ "Zürich",
+ )
+
cantons = (
("AG", "Aargau"),
("AI", "Appenzell Innerrhoden"),
@@ -58,6 +223,13 @@ class Provider(AddressProvider):
"""
return self.random_element(self.cantons)
+ def city_name(self) -> str:
+ """
+ Randomly returns a swiss city.
+ :example 'Zug'
+ """
+ return self.random_element(self.cities)
+
def administrative_unit(self) -> str:
"""
Randomly returns a Swiss canton name.
| fake.address does not work properly for 'de-CH'
* Faker version: 9.8.3 build: pyhd8ed1ab_0 from: conda-forge
* OS: Windows 10 / anaconda
fake = Faker('de_CH')
fake.address()
does not work properly.
Instead of producing a city/village, it produces a Canton
### Steps to reproduce
from faker import Faker
fake = Faker('de_CH')
print(fake.address())
### Expected behavior
Output should be:
street housenumber
zip_code village
### Actual behavior
Output is
street housenumber
zip_code canton | joke2k/faker | diff --git a/tests/providers/test_address.py b/tests/providers/test_address.py
index ce005c11..918c9c36 100644
--- a/tests/providers/test_address.py
+++ b/tests/providers/test_address.py
@@ -1776,6 +1776,12 @@ class TestDeCh:
assert isinstance(canton, tuple)
assert canton in DeChAddressProvider.cantons
+ def test_city(self, faker, num_samples):
+ for _ in range(num_samples):
+ city = faker.city_name()
+ assert isinstance(city, str)
+ assert city in DeChAddressProvider.cities
+
class TestRoRo:
"""Test ro_RO address provider methods"""
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 12.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"flake8",
"mypy",
"isort",
"black"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | black==25.1.0
click==8.1.8
exceptiongroup==1.2.2
-e git+https://github.com/joke2k/faker.git@fc127fa65b7d16817b1fcea6f4186ef417ed4fcc#egg=Faker
flake8==7.2.0
iniconfig==2.1.0
isort==6.0.1
mccabe==0.7.0
mypy==1.15.0
mypy-extensions==1.0.0
packaging==24.2
pathspec==0.12.1
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.2
pytest==8.3.5
python-dateutil==2.9.0.post0
six==1.17.0
tomli==2.2.1
typing_extensions==4.13.0
| name: faker
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- black==25.1.0
- click==8.1.8
- exceptiongroup==1.2.2
- flake8==7.2.0
- iniconfig==2.1.0
- isort==6.0.1
- mccabe==0.7.0
- mypy==1.15.0
- mypy-extensions==1.0.0
- packaging==24.2
- pathspec==0.12.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- six==1.17.0
- tomli==2.2.1
- typing-extensions==4.13.0
prefix: /opt/conda/envs/faker
| [
"tests/providers/test_address.py::TestDeCh::test_city"
] | [] | [
"tests/providers/test_address.py::TestBaseProvider::test_alpha_2_country_codes",
"tests/providers/test_address.py::TestBaseProvider::test_alpha_2_country_codes_as_default",
"tests/providers/test_address.py::TestBaseProvider::test_alpha_3_country_codes",
"tests/providers/test_address.py::TestBaseProvider::test_bad_country_code_representation",
"tests/providers/test_address.py::TestBaseProvider::test_administrative_unit_all_locales",
"tests/providers/test_address.py::TestBaseProvider::test_country_code_all_locales",
"tests/providers/test_address.py::TestBaseProvider::test_current_country_errors",
"tests/providers/test_address.py::TestAzAz::test_street_suffix_long",
"tests/providers/test_address.py::TestAzAz::test_city_name",
"tests/providers/test_address.py::TestAzAz::test_street_name",
"tests/providers/test_address.py::TestAzAz::test_settlement_name",
"tests/providers/test_address.py::TestAzAz::test_village_name",
"tests/providers/test_address.py::TestAzAz::test_postcode",
"tests/providers/test_address.py::TestCsCz::test_street_suffix_short",
"tests/providers/test_address.py::TestCsCz::test_street_suffix_long",
"tests/providers/test_address.py::TestCsCz::test_city_name",
"tests/providers/test_address.py::TestCsCz::test_street_name",
"tests/providers/test_address.py::TestCsCz::test_state",
"tests/providers/test_address.py::TestCsCz::test_postcode",
"tests/providers/test_address.py::TestCsCz::test_city_with_postcode",
"tests/providers/test_address.py::TestDaDk::test_street_prefix",
"tests/providers/test_address.py::TestDaDk::test_city_name",
"tests/providers/test_address.py::TestDaDk::test_state",
"tests/providers/test_address.py::TestDaDk::test_postcode",
"tests/providers/test_address.py::TestDeAt::test_city",
"tests/providers/test_address.py::TestDeAt::test_state",
"tests/providers/test_address.py::TestDeAt::test_street_suffix_short",
"tests/providers/test_address.py::TestDeAt::test_street_suffix_long",
"tests/providers/test_address.py::TestDeAt::test_country",
"tests/providers/test_address.py::TestDeAt::test_postcode",
"tests/providers/test_address.py::TestDeAt::test_city_with_postcode",
"tests/providers/test_address.py::TestDeDe::test_city",
"tests/providers/test_address.py::TestDeDe::test_state",
"tests/providers/test_address.py::TestDeDe::test_street_suffix_short",
"tests/providers/test_address.py::TestDeDe::test_street_suffix_long",
"tests/providers/test_address.py::TestDeDe::test_country",
"tests/providers/test_address.py::TestDeDe::test_postcode",
"tests/providers/test_address.py::TestDeDe::test_city_with_postcode",
"tests/providers/test_address.py::TestElGr::test_line_address",
"tests/providers/test_address.py::TestElGr::test_street_prefix_short",
"tests/providers/test_address.py::TestElGr::test_street_prefix_long",
"tests/providers/test_address.py::TestElGr::test_street",
"tests/providers/test_address.py::TestElGr::test_city",
"tests/providers/test_address.py::TestElGr::test_region",
"tests/providers/test_address.py::TestEnAu::test_postcode",
"tests/providers/test_address.py::TestEnAu::test_state",
"tests/providers/test_address.py::TestEnAu::test_city_prefix",
"tests/providers/test_address.py::TestEnAu::test_state_abbr",
"tests/providers/test_address.py::TestEnNz::test_state",
"tests/providers/test_address.py::TestEnNz::test_postcode",
"tests/providers/test_address.py::TestEnCa::test_postcode",
"tests/providers/test_address.py::TestEnCa::test_postcode_in_province",
"tests/providers/test_address.py::TestEnCa::test_postalcode",
"tests/providers/test_address.py::TestEnCa::test_postal_code_letter",
"tests/providers/test_address.py::TestEnCa::test_province",
"tests/providers/test_address.py::TestEnCa::test_province_abbr",
"tests/providers/test_address.py::TestEnCa::test_city_prefix",
"tests/providers/test_address.py::TestEnCa::test_secondary_address",
"tests/providers/test_address.py::TestEnGb::test_county",
"tests/providers/test_address.py::TestEnIe::test_postcode",
"tests/providers/test_address.py::TestEnIe::test_county",
"tests/providers/test_address.py::TestEnUS::test_city_prefix",
"tests/providers/test_address.py::TestEnUS::test_state",
"tests/providers/test_address.py::TestEnUS::test_state_abbr",
"tests/providers/test_address.py::TestEnUS::test_state_abbr_no_territories",
"tests/providers/test_address.py::TestEnUS::test_postcode",
"tests/providers/test_address.py::TestEnUS::test_postcode_in_state",
"tests/providers/test_address.py::TestEnUS::test_zipcode",
"tests/providers/test_address.py::TestEnUS::test_zipcode_in_state",
"tests/providers/test_address.py::TestEnUS::test_zipcode_plus4",
"tests/providers/test_address.py::TestEnUS::test_military_ship",
"tests/providers/test_address.py::TestEnUS::test_military_state",
"tests/providers/test_address.py::TestEnUS::test_military_apo",
"tests/providers/test_address.py::TestEnUS::test_military_dpo",
"tests/providers/test_address.py::TestEnUS::test_postalcode",
"tests/providers/test_address.py::TestEnUS::test_postalcode_in_state",
"tests/providers/test_address.py::TestEsCo::test_department_code",
"tests/providers/test_address.py::TestEsCo::test_department",
"tests/providers/test_address.py::TestEsCo::test_municipality_code",
"tests/providers/test_address.py::TestEsCo::test_municipality",
"tests/providers/test_address.py::TestEsCo::test_street_prefix",
"tests/providers/test_address.py::TestEsCo::test_street_suffix",
"tests/providers/test_address.py::TestEsCo::test_street_name",
"tests/providers/test_address.py::TestEsCo::test_building_number",
"tests/providers/test_address.py::TestEsCo::test_secondary_address",
"tests/providers/test_address.py::TestEsCo::test_street_address",
"tests/providers/test_address.py::TestEsCo::test_postcode",
"tests/providers/test_address.py::TestEsCo::test_address",
"tests/providers/test_address.py::TestEsEs::test_state_name",
"tests/providers/test_address.py::TestEsEs::test_street_prefix",
"tests/providers/test_address.py::TestEsEs::test_secondary_address",
"tests/providers/test_address.py::TestEsEs::test_regions",
"tests/providers/test_address.py::TestEsEs::test_autonomous_community",
"tests/providers/test_address.py::TestEsMx::test_city_prefix",
"tests/providers/test_address.py::TestEsMx::test_city_suffix",
"tests/providers/test_address.py::TestEsMx::test_city_adjective",
"tests/providers/test_address.py::TestEsMx::test_street_prefix",
"tests/providers/test_address.py::TestEsMx::test_secondary_address",
"tests/providers/test_address.py::TestEsMx::test_state",
"tests/providers/test_address.py::TestEsMx::test_state_abbr",
"tests/providers/test_address.py::TestFaIr::test_city_prefix",
"tests/providers/test_address.py::TestFaIr::test_secondary_address",
"tests/providers/test_address.py::TestFaIr::test_state",
"tests/providers/test_address.py::TestFrFr::test_street_prefix",
"tests/providers/test_address.py::TestFrFr::test_city_prefix",
"tests/providers/test_address.py::TestFrFr::test_region",
"tests/providers/test_address.py::TestFrFr::test_department",
"tests/providers/test_address.py::TestFrFr::test_department_name",
"tests/providers/test_address.py::TestFrFr::test_department_number",
"tests/providers/test_address.py::TestHeIl::test_city_name",
"tests/providers/test_address.py::TestHeIl::test_street_title",
"tests/providers/test_address.py::TestHiIn::test_city_name",
"tests/providers/test_address.py::TestHiIn::test_state",
"tests/providers/test_address.py::TestTaIn::test_city_name",
"tests/providers/test_address.py::TestTaIn::test_state",
"tests/providers/test_address.py::TestFiFi::test_city",
"tests/providers/test_address.py::TestFiFi::test_street_suffix",
"tests/providers/test_address.py::TestFiFi::test_state",
"tests/providers/test_address.py::TestHrHr::test_city_name",
"tests/providers/test_address.py::TestHrHr::test_street_name",
"tests/providers/test_address.py::TestHrHr::test_state",
"tests/providers/test_address.py::TestHuHu::test_postcode",
"tests/providers/test_address.py::TestHuHu::test_street_address",
"tests/providers/test_address.py::TestHuHu::test_street_address_with_county",
"tests/providers/test_address.py::TestHuHu::test_address",
"tests/providers/test_address.py::TestHyAm::test_address",
"tests/providers/test_address.py::TestHyAm::test_building_number",
"tests/providers/test_address.py::TestHyAm::test_city",
"tests/providers/test_address.py::TestHyAm::test_city_prefix",
"tests/providers/test_address.py::TestHyAm::test_country",
"tests/providers/test_address.py::TestHyAm::test_postcode",
"tests/providers/test_address.py::TestHyAm::test_postcode_in_state",
"tests/providers/test_address.py::TestHyAm::test_secondary_address",
"tests/providers/test_address.py::TestHyAm::test_state",
"tests/providers/test_address.py::TestHyAm::test_state_abbr",
"tests/providers/test_address.py::TestHyAm::test_street",
"tests/providers/test_address.py::TestHyAm::test_street_address",
"tests/providers/test_address.py::TestHyAm::test_street_name",
"tests/providers/test_address.py::TestHyAm::test_street_prefix",
"tests/providers/test_address.py::TestHyAm::test_street_suffix",
"tests/providers/test_address.py::TestHyAm::test_village",
"tests/providers/test_address.py::TestHyAm::test_village_prefix",
"tests/providers/test_address.py::TestItIt::test_city",
"tests/providers/test_address.py::TestItIt::test_postcode_city_province",
"tests/providers/test_address.py::TestJaJp::test_chome",
"tests/providers/test_address.py::TestJaJp::test_ban",
"tests/providers/test_address.py::TestJaJp::test_gou",
"tests/providers/test_address.py::TestJaJp::test_town",
"tests/providers/test_address.py::TestJaJp::test_prefecture",
"tests/providers/test_address.py::TestJaJp::test_city",
"tests/providers/test_address.py::TestJaJp::test_country",
"tests/providers/test_address.py::TestJaJp::test_building_name",
"tests/providers/test_address.py::TestJaJp::test_address",
"tests/providers/test_address.py::TestJaJp::test_postcode",
"tests/providers/test_address.py::TestJaJp::test_zipcode",
"tests/providers/test_address.py::TestKoKr::test_old_postal_code",
"tests/providers/test_address.py::TestKoKr::test_postal_code",
"tests/providers/test_address.py::TestKoKr::test_postcode",
"tests/providers/test_address.py::TestNeNp::test_province",
"tests/providers/test_address.py::TestNeNp::test_district",
"tests/providers/test_address.py::TestNeNp::test_city",
"tests/providers/test_address.py::TestNeNp::test_country",
"tests/providers/test_address.py::TestNoNo::test_postcode",
"tests/providers/test_address.py::TestNoNo::test_city_suffix",
"tests/providers/test_address.py::TestNoNo::test_street_suffix",
"tests/providers/test_address.py::TestNoNo::test_address",
"tests/providers/test_address.py::TestZhTw::test_postcode",
"tests/providers/test_address.py::TestZhTw::test_city_name",
"tests/providers/test_address.py::TestZhTw::test_city_suffix",
"tests/providers/test_address.py::TestZhTw::test_city",
"tests/providers/test_address.py::TestZhTw::test_country",
"tests/providers/test_address.py::TestZhTw::test_street_name",
"tests/providers/test_address.py::TestZhTw::test_address",
"tests/providers/test_address.py::TestZhCn::test_postcode",
"tests/providers/test_address.py::TestZhCn::test_city_name",
"tests/providers/test_address.py::TestZhCn::test_city_suffix",
"tests/providers/test_address.py::TestZhCn::test_city",
"tests/providers/test_address.py::TestZhCn::test_province",
"tests/providers/test_address.py::TestZhCn::test_district",
"tests/providers/test_address.py::TestZhCn::test_country",
"tests/providers/test_address.py::TestZhCn::test_street_name",
"tests/providers/test_address.py::TestZhCn::test_address",
"tests/providers/test_address.py::TestPtBr::test_country",
"tests/providers/test_address.py::TestPtBr::test_bairro",
"tests/providers/test_address.py::TestPtBr::test_neighborhood",
"tests/providers/test_address.py::TestPtBr::test_estado",
"tests/providers/test_address.py::TestPtBr::test_estado_nome",
"tests/providers/test_address.py::TestPtBr::test_estado_sigla",
"tests/providers/test_address.py::TestPtBr::test_address",
"tests/providers/test_address.py::TestPtBr::test_raw_postcode",
"tests/providers/test_address.py::TestPtBr::test_formatted_postcode",
"tests/providers/test_address.py::TestPtPt::test_distrito",
"tests/providers/test_address.py::TestPtPt::test_concelho",
"tests/providers/test_address.py::TestPtPt::test_freguesia",
"tests/providers/test_address.py::TestPtPt::test_place_name",
"tests/providers/test_address.py::TestEnPh::test_metro_manila_postcode",
"tests/providers/test_address.py::TestEnPh::test_luzon_province_postcode",
"tests/providers/test_address.py::TestEnPh::test_visayas_province_postcode",
"tests/providers/test_address.py::TestEnPh::test_mindanao_province_postcode",
"tests/providers/test_address.py::TestEnPh::test_postcode",
"tests/providers/test_address.py::TestEnPh::test_building_number",
"tests/providers/test_address.py::TestEnPh::test_floor_unit_number",
"tests/providers/test_address.py::TestEnPh::test_ordinal_floor_number",
"tests/providers/test_address.py::TestEnPh::test_address",
"tests/providers/test_address.py::TestFilPh::test_metro_manila_postcode",
"tests/providers/test_address.py::TestFilPh::test_luzon_province_postcode",
"tests/providers/test_address.py::TestFilPh::test_visayas_province_postcode",
"tests/providers/test_address.py::TestFilPh::test_mindanao_province_postcode",
"tests/providers/test_address.py::TestFilPh::test_postcode",
"tests/providers/test_address.py::TestFilPh::test_building_number",
"tests/providers/test_address.py::TestFilPh::test_floor_unit_number",
"tests/providers/test_address.py::TestFilPh::test_ordinal_floor_number",
"tests/providers/test_address.py::TestFilPh::test_address",
"tests/providers/test_address.py::TestTlPh::test_metro_manila_postcode",
"tests/providers/test_address.py::TestTlPh::test_luzon_province_postcode",
"tests/providers/test_address.py::TestTlPh::test_visayas_province_postcode",
"tests/providers/test_address.py::TestTlPh::test_mindanao_province_postcode",
"tests/providers/test_address.py::TestTlPh::test_postcode",
"tests/providers/test_address.py::TestTlPh::test_building_number",
"tests/providers/test_address.py::TestTlPh::test_floor_unit_number",
"tests/providers/test_address.py::TestTlPh::test_ordinal_floor_number",
"tests/providers/test_address.py::TestTlPh::test_address",
"tests/providers/test_address.py::TestRuRu::test_city_name",
"tests/providers/test_address.py::TestRuRu::test_country",
"tests/providers/test_address.py::TestRuRu::test_region",
"tests/providers/test_address.py::TestRuRu::test_postcode",
"tests/providers/test_address.py::TestRuRu::test_city_prefix",
"tests/providers/test_address.py::TestRuRu::test_street_suffix",
"tests/providers/test_address.py::TestRuRu::test_street_title",
"tests/providers/test_address.py::TestRuRu::test_street_name",
"tests/providers/test_address.py::TestRuRu::test_street_name_lexical[feminine_suffix_and_noflex_title]",
"tests/providers/test_address.py::TestRuRu::test_street_name_lexical[feminine_suffix_and_flex_title]",
"tests/providers/test_address.py::TestRuRu::test_street_name_lexical[non_feminine_suffix_and_noflex_title]",
"tests/providers/test_address.py::TestRuRu::test_street_name_lexical[masc_suffix_and_irregular_masc_title]",
"tests/providers/test_address.py::TestRuRu::test_street_name_lexical[masc_suffix_and_ck_street_stem]",
"tests/providers/test_address.py::TestRuRu::test_street_name_lexical[masc_suffix_and_uk_street_stem]",
"tests/providers/test_address.py::TestRuRu::test_street_name_lexical[masc_suffix_and_other_stem]",
"tests/providers/test_address.py::TestRuRu::test_street_name_lexical[neu_suffx_and_iregular_neu_street_title]",
"tests/providers/test_address.py::TestRuRu::test_street_name_lexical[neu_suffix_and_regular_street_title]",
"tests/providers/test_address.py::TestThTh::test_country",
"tests/providers/test_address.py::TestThTh::test_city_name",
"tests/providers/test_address.py::TestThTh::test_province",
"tests/providers/test_address.py::TestThTh::test_amphoe",
"tests/providers/test_address.py::TestThTh::test_tambon",
"tests/providers/test_address.py::TestThTh::test_postcode",
"tests/providers/test_address.py::TestEnIn::test_city_name",
"tests/providers/test_address.py::TestEnIn::test_state",
"tests/providers/test_address.py::TestSkSk::test_street_suffix_short",
"tests/providers/test_address.py::TestSkSk::test_street_suffix_long",
"tests/providers/test_address.py::TestSkSk::test_city_name",
"tests/providers/test_address.py::TestSkSk::test_street_name",
"tests/providers/test_address.py::TestSkSk::test_state",
"tests/providers/test_address.py::TestSkSk::test_postcode",
"tests/providers/test_address.py::TestSkSk::test_city_with_postcode",
"tests/providers/test_address.py::TestDeCh::test_canton_name",
"tests/providers/test_address.py::TestDeCh::test_canton_code",
"tests/providers/test_address.py::TestDeCh::test_canton",
"tests/providers/test_address.py::TestRoRo::test_address",
"tests/providers/test_address.py::TestRoRo::test_street_address",
"tests/providers/test_address.py::TestRoRo::test_street_name",
"tests/providers/test_address.py::TestRoRo::test_street_prefix",
"tests/providers/test_address.py::TestRoRo::test_building_number",
"tests/providers/test_address.py::TestRoRo::test_secondary_address",
"tests/providers/test_address.py::TestRoRo::test_city",
"tests/providers/test_address.py::TestRoRo::test_city_name",
"tests/providers/test_address.py::TestRoRo::test_state",
"tests/providers/test_address.py::TestRoRo::test_state_abbr",
"tests/providers/test_address.py::TestRoRo::test_postcode",
"tests/providers/test_address.py::TestRoRo::test_city_with_postcode"
] | [] | MIT License | 12,179 | 1,468 | [
"faker/providers/address/de_CH/__init__.py"
] |
fjosw__pyerrors-61 | 7568275d5d6f1ae48b938c6cc604dadf43e8395f | 2022-02-09 14:20:05 | 815970e780a22f63c9a6092d718a7ff3e5b37c8b | diff --git a/pyerrors/input/json.py b/pyerrors/input/json.py
index 6b85487..cdf203f 100644
--- a/pyerrors/input/json.py
+++ b/pyerrors/input/json.py
@@ -171,6 +171,9 @@ def create_json_string(ol, description='', indent=1):
names.append(key)
idl.append(value)
my_obs = Obs(samples, names, idl)
+ my_obs._covobs = obs._covobs
+ for name in obs._covobs:
+ my_obs.names.append(name)
my_obs.reweighted = obs.reweighted
my_obs.is_merged = obs.is_merged
return my_obs
| Bug in JSON export together with Correlators and Covobs
The export of a derivative of a correlator that includes a Covobs fails. Minimal not working example:
```python
c = pe.Corr([pe.pseudo_Obs(i, .1, 'test') for i in range(10)])
c *= pe.cov_Obs(1., .1, '#ren')
c = c.deriv()
pe.input.json.dump_to_json(c, 'test')
```
gives
[...]
```
~/phd/git/pyerrors_github/pyerrors/misc.py in _assert_equal_properties(ol, otype)
84 raise Exception("All Obs in list have to have the same property 'reweighted'.")
85 if not ol[0].e_content == o.e_content:
---> 86 raise Exception("All Obs in list have to be defined on the same set of configs.")
87 if not ol[0].idl == o.idl:
88 raise Exception("All Obs in list have to be defined on the same set of configurations.")
Exception: All Obs in list have to be defined on the same set of configs.
```
This is based on several problems. In
https://github.com/fjosw/pyerrors/blob/f51503555be8349d441c6e982f34d416f2defd74/pyerrors/input/json.py#L165-L176
is missing a line
```python
my_obs._covobs = obs._covobs
```
But this does not solve the problem, since the covobs are not known to e_content
https://github.com/fjosw/pyerrors/blob/f51503555be8349d441c6e982f34d416f2defd74/pyerrors/obs.py#L168-L175
because self.names is not updated, when the covobs are set manually. There are several possible fixes:
- Add
```python
for name in obs._covobs:
my_obs.names.append(name)
```
to _nan_Obs_like
- Write a method to set covobs manually, that takes care of this in all cases, where we do this. However, the user should not do this, in principle.
- Construct ```e_content``` and ``` mc_names``` differently by having a property, that automatically generates names from self.names and self._covobs.keys()
What do you think? | fjosw/pyerrors | diff --git a/tests/json_io_test.py b/tests/json_io_test.py
index f5546d9..04ccfd9 100644
--- a/tests/json_io_test.py
+++ b/tests/json_io_test.py
@@ -242,3 +242,12 @@ def test_json_dict_io():
jsonio.dump_dict_to_json(od, fname, description=desc)
os.remove(fname + '.json.gz')
+
+
+def test_renorm_deriv_of_corr(tmp_path):
+ c = pe.Corr([pe.pseudo_Obs(i, .1, 'test') for i in range(10)])
+ c *= pe.cov_Obs(1., .1, '#ren')
+ c = c.deriv()
+ pe.input.json.dump_to_json(c, (tmp_path / 'test').as_posix())
+ recover = pe.input.json.load_json((tmp_path / 'test').as_posix())
+ assert np.all([o == 0 for o in (c - recover)[1:-1]])
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | autograd @ git+https://github.com/HIPS/autograd.git@f2f4c66b2afd6c5a7b0f830b4bd05b99b19bcaa7
contourpy==1.3.0
coverage==7.8.0
cycler==0.12.1
exceptiongroup==1.2.2
fonttools==4.56.0
h5py==3.13.0
iminuit==2.30.1
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
matplotlib==3.9.4
numdifftools==0.9.41
numpy==2.0.2
packaging==24.2
pillow==11.1.0
pluggy==1.5.0
-e git+https://github.com/fjosw/pyerrors.git@7568275d5d6f1ae48b938c6cc604dadf43e8395f#egg=pyerrors
pyparsing==3.2.3
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
scipy==1.13.1
six==1.17.0
tomli==2.2.1
zipp==3.21.0
| name: pyerrors
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- autograd==1.7.0
- contourpy==1.3.0
- coverage==7.8.0
- cycler==0.12.1
- exceptiongroup==1.2.2
- fonttools==4.56.0
- h5py==3.13.0
- iminuit==2.30.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- matplotlib==3.9.4
- numdifftools==0.9.41
- numpy==2.0.2
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- scipy==1.13.1
- six==1.17.0
- tomli==2.2.1
- zipp==3.21.0
prefix: /opt/conda/envs/pyerrors
| [
"tests/json_io_test.py::test_renorm_deriv_of_corr"
] | [] | [
"tests/json_io_test.py::test_jsonio",
"tests/json_io_test.py::test_json_string_reconstruction",
"tests/json_io_test.py::test_json_corr_io",
"tests/json_io_test.py::test_json_corr_2d_io",
"tests/json_io_test.py::test_json_dict_io"
] | [] | MIT License | 12,192 | 161 | [
"pyerrors/input/json.py"
] |
|
chanzuckerberg__miniwdl-549 | d50d97a9e263373d8d46610b2fdcc99b16059e49 | 2022-02-10 11:26:40 | d50d97a9e263373d8d46610b2fdcc99b16059e49 | coveralls: ## Pull Request Test Coverage Report for [Build 1823474294](https://coveralls.io/builds/46433010)
* **1** of **1** **(100.0%)** changed or added relevant line in **1** file are covered.
* **3** unchanged lines in **3** files lost coverage.
* Overall coverage decreased (**-0.03%**) to **94.98%**
---
| Files with Coverage Reduction | New Missed Lines | % |
| :-----|--------------|--: |
| [WDL/runtime/download.py](https://coveralls.io/builds/46433010/source?filename=WDL%2Fruntime%2Fdownload.py#L116) | 1 | 88.52% |
| [WDL/runtime/task.py](https://coveralls.io/builds/46433010/source?filename=WDL%2Fruntime%2Ftask.py#L71) | 1 | 94.44% |
| [WDL/runtime/workflow.py](https://coveralls.io/builds/46433010/source?filename=WDL%2Fruntime%2Fworkflow.py#L1013) | 1 | 97.0% |
<!-- | **Total:** | **3** | | -->
| Totals | [](https://coveralls.io/builds/46433010) |
| :-- | --: |
| Change from base [Build 1815967130](https://coveralls.io/builds/46360100): | -0.03% |
| Covered Lines: | 6830 |
| Relevant Lines: | 7191 |
---
##### 💛 - [Coveralls](https://coveralls.io)
mlin: @kinow Thanks for tracking this down; it was certainly a dark corner of the codebase, which I had to re-read a couple times to remember how and why it's doing that! | diff --git a/WDL/_parser.py b/WDL/_parser.py
index 1705457..e9dda6d 100644
--- a/WDL/_parser.py
+++ b/WDL/_parser.py
@@ -16,19 +16,26 @@ _lark_lock = threading.Lock()
def parse(grammar: str, txt: str, start: str) -> Tuple[lark.Tree, List[lark.Token]]:
with _lark_lock:
- if (grammar, start) not in _lark_cache:
- _lark_cache[(grammar, start)] = lark.Lark(
- grammar,
- start=start,
- parser="lalr",
- maybe_placeholders=False,
- propagate_positions=True,
- lexer_callbacks={"COMMENT": _lark_comments_buffer.append},
+ assert not _lark_comments_buffer
+ try:
+ if (grammar, start) not in _lark_cache:
+ _lark_cache[(grammar, start)] = lark.Lark(
+ grammar,
+ start=start,
+ parser="lalr",
+ maybe_placeholders=False,
+ propagate_positions=True,
+ lexer_callbacks={"COMMENT": _lark_comments_buffer.append},
+ )
+ tree = _lark_cache[(grammar, start)].parse(
+ txt + ("\n" if not txt.endswith("\n") else "")
)
- tree = _lark_cache[(grammar, start)].parse(txt + ("\n" if not txt.endswith("\n") else ""))
- comments = _lark_comments_buffer.copy()
- _lark_comments_buffer.clear()
- return (tree, comments)
+ comments = _lark_comments_buffer.copy()
+ return (tree, comments)
+ finally:
+ # Wipe temp state (success or fail). It receives the side-effects of lark's
+ # lexer_callbacks, which we have to bind before memoizing the parser object.
+ _lark_comments_buffer.clear()
def to_int(x):
| Lark parser's comments cache is not cleared when a syntax error occurs
This appears to cause an assertion error when loading multiple WDL workflows. The state ([comments cache](https://github.com/chanzuckerberg/miniwdl/blob/d50d97a9e263373d8d46610b2fdcc99b16059e49/WDL/_parser.py#L13)) is kept at the module level, so only when the module is unloaded or application stops the state is cleared, or when the parser [executes successfully](d50d97a9e263373d8d46610b2fdcc99b16059e49).
When you parse an invalid workflow that raises a `SyntaxError`, for instance, the cache will preserve the comments. Then, when you parse a valid workflow next, there will be an assertion error [when the `Document` object is created](https://github.com/chanzuckerberg/miniwdl/blob/d50d97a9e263373d8d46610b2fdcc99b16059e49/WDL/Tree.py#L1410).
This caused an error in one of the pull requests for the `wdl-cwl-translator`, as the test for an invalid file was executed before the other tests: https://github.com/common-workflow-lab/wdl-cwl-translator/pull/168
Thanks!
Bruno | chanzuckerberg/miniwdl | diff --git a/tests/test_1doc.py b/tests/test_1doc.py
index cc8cc42..817323b 100644
--- a/tests/test_1doc.py
+++ b/tests/test_1doc.py
@@ -1,5 +1,4 @@
-import unittest, inspect, tempfile, os, pickle
-from typing import Optional
+import unittest, tempfile, os, pickle
from .context import WDL
class TestTasks(unittest.TestCase):
@@ -1552,6 +1551,26 @@ task count_lines {
"""
WDL.parse_document(doc).typecheck()
+ def test_issue548_comments_buffer(self):
+ # bug where the comments cache was not emptied after a SyntaxError
+ bad_doc = r"""
+ version development
+
+ # comment 1
+ # comment 2
+ workflow a {
+ """
+ good_doc = r"""
+ version development
+
+ workflow a {}
+ """
+ # Previous to the fix, the comments in the bad doc were preserved, causing an
+ # assertion error when parsing the good doc.
+ with self.assertRaises(WDL.Error.SyntaxError):
+ WDL.parse_document(bad_doc)
+ WDL.parse_document(good_doc).typecheck()
+
class TestCycleDetection(unittest.TestCase):
def test_task(self):
doc = r"""
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_git_commit_hash",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt",
"requirements.dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
anyio==4.9.0
appdirs==1.4.4
argcomplete==1.12.3
astroid==3.3.9
babel==2.17.0
backcall==0.2.0
backports.tarfile==1.2.0
black==21.7b0
boto3==1.37.23
botocore==1.37.23
bullet==2.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
click-log==0.4.0
colorama==0.4.6
coloredlogs==15.0.1
commonmark==0.9.1
coverage==7.8.0
cryptography==44.0.2
decorator==5.2.1
dill==0.3.9
docker==7.1.0
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
flake8==7.2.0
graphviz==0.20.3
greenlet==3.1.1
h11==0.14.0
humanfriendly==10.0
id==1.5.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
ipython==6.5.0
isort==6.0.1
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jedi==0.19.2
jeepney==0.9.0
Jinja2==3.1.6
jmespath==1.0.1
keyring==25.6.0
lark==1.2.2
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mccabe==0.7.0
mdurl==0.1.2
-e git+https://github.com/chanzuckerberg/miniwdl.git@d50d97a9e263373d8d46610b2fdcc99b16059e49#egg=miniwdl
more-itertools==10.6.0
munch==4.0.0
mypy-extensions==1.0.0
nh3==0.2.21
packaging==24.2
parso==0.8.4
pathspec==0.12.1
pbr==6.1.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==4.3.7
pluggy==1.5.0
prompt-toolkit==1.0.18
psutil==7.0.0
ptyprocess==0.7.0
pycodestyle==2.13.0
pycparser==2.22
pyflakes==3.3.2
Pygments==2.19.1
pygtail==0.14.0
pylint==3.3.6
pyre-check==0.0.27
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-coveralls==2.9.3
python-dateutil==2.9.0.post0
python-json-logger==2.0.7
pywatchman==2.0.0
PyYAML==6.0.2
readme_renderer==44.0
recommonmark==0.7.1
regex==2024.11.6
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
s3transfer==0.11.4
SecretStorage==3.3.3
simplegeneric==0.8.1
six==1.17.0
sniffio==1.3.1
snowballstemmer==2.2.0
Sphinx==7.3.0
sphinx-argparse==0.4.0
sphinx-autobuild==2024.10.3
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
SQLAlchemy==2.0.40
starlette==0.46.1
testfixtures==8.3.0
tomli==1.2.3
tomlkit==0.13.2
traitlets==5.14.3
twine==6.1.0
typing_extensions==4.13.0
ujson==1.35
urllib3==1.26.20
uvicorn==0.34.0
watchfiles==1.0.4
wcwidth==0.2.13
websockets==15.0.1
xdg==6.0.0
xxhash==1.3.0
zipp==3.21.0
| name: miniwdl
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- anyio==4.9.0
- appdirs==1.4.4
- argcomplete==1.12.3
- astroid==3.3.9
- babel==2.17.0
- backcall==0.2.0
- backports-tarfile==1.2.0
- black==21.7b0
- boto3==1.37.23
- botocore==1.37.23
- bullet==2.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- click-log==0.4.0
- colorama==0.4.6
- coloredlogs==15.0.1
- commonmark==0.9.1
- coverage==7.8.0
- cryptography==44.0.2
- decorator==5.2.1
- dill==0.3.9
- docker==7.1.0
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- flake8==7.2.0
- graphviz==0.20.3
- greenlet==3.1.1
- h11==0.14.0
- humanfriendly==10.0
- id==1.5.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipython==6.5.0
- isort==6.0.1
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jedi==0.19.2
- jeepney==0.9.0
- jinja2==3.1.6
- jmespath==1.0.1
- keyring==25.6.0
- lark==1.2.2
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- mccabe==0.7.0
- mdurl==0.1.2
- more-itertools==10.6.0
- munch==4.0.0
- mypy-extensions==1.0.0
- nh3==0.2.21
- packaging==24.2
- parso==0.8.4
- pathspec==0.12.1
- pbr==6.1.1
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==4.3.7
- pluggy==1.5.0
- prompt-toolkit==1.0.18
- psutil==7.0.0
- ptyprocess==0.7.0
- pycodestyle==2.13.0
- pycparser==2.22
- pyflakes==3.3.2
- pygments==2.19.1
- pygtail==0.14.0
- pylint==3.3.6
- pyre-check==0.0.27
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-coveralls==2.9.3
- python-dateutil==2.9.0.post0
- python-json-logger==2.0.7
- pywatchman==2.0.0
- pyyaml==6.0.2
- readme-renderer==44.0
- recommonmark==0.7.1
- regex==2024.11.6
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- s3transfer==0.11.4
- secretstorage==3.3.3
- simplegeneric==0.8.1
- six==1.17.0
- sniffio==1.3.1
- snowballstemmer==2.2.0
- sphinx==7.3.0
- sphinx-argparse==0.4.0
- sphinx-autobuild==2024.10.3
- sphinx-rtd-theme==3.0.2
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sqlalchemy==2.0.40
- starlette==0.46.1
- testfixtures==8.3.0
- tomli==1.2.3
- tomlkit==0.13.2
- traitlets==5.14.3
- twine==6.1.0
- typing-extensions==4.13.0
- ujson==1.35
- urllib3==1.26.20
- uvicorn==0.34.0
- watchfiles==1.0.4
- wcwidth==0.2.13
- websockets==15.0.1
- xdg==6.0.0
- xxhash==1.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/miniwdl
| [
"tests/test_1doc.py::TestDoc::test_issue548_comments_buffer"
] | [
"tests/test_1doc.py::TestStruct::test_import"
] | [
"tests/test_1doc.py::TestTasks::test_compare_md5sums",
"tests/test_1doc.py::TestTasks::test_errors",
"tests/test_1doc.py::TestTasks::test_meta",
"tests/test_1doc.py::TestTasks::test_placeholders",
"tests/test_1doc.py::TestTasks::test_wc",
"tests/test_1doc.py::TestTypes::test_invalid",
"tests/test_1doc.py::TestTypes::test_map_io",
"tests/test_1doc.py::TestTypes::test_parser",
"tests/test_1doc.py::TestTypes::test_unify",
"tests/test_1doc.py::TestDoc::test_bam_chrom_counter",
"tests/test_1doc.py::TestDoc::test_count_foo",
"tests/test_1doc.py::TestDoc::test_draft2_workflow_outputs",
"tests/test_1doc.py::TestDoc::test_errors",
"tests/test_1doc.py::TestDoc::test_issue135_workflow_available_inputs",
"tests/test_1doc.py::TestDoc::test_issue173_workflow_section_order",
"tests/test_1doc.py::TestDoc::test_issue233_version_development",
"tests/test_1doc.py::TestDoc::test_multi_errors",
"tests/test_1doc.py::TestDoc::test_nested_scatter",
"tests/test_1doc.py::TestDoc::test_scatter_conditional",
"tests/test_1doc.py::TestDoc::test_task_forward_reference",
"tests/test_1doc.py::TestDoc::test_workflow_inputs",
"tests/test_1doc.py::TestCycleDetection::test_task",
"tests/test_1doc.py::TestCycleDetection::test_workflow",
"tests/test_1doc.py::TestStruct::test_decl",
"tests/test_1doc.py::TestStruct::test_keywords",
"tests/test_1doc.py::TestStruct::test_object_literal",
"tests/test_1doc.py::TestStruct::test_parser",
"tests/test_1doc.py::TestNoneLiteral::test_none_expr",
"tests/test_1doc.py::TestNoneLiteral::test_none_type_errors"
] | [] | MIT License | 12,196 | 446 | [
"WDL/_parser.py"
] |
ishepard__pydriller-206 | 036978c2e1f2d849395a23f2dba62d67b0c17ba7 | 2022-02-11 05:56:15 | 036978c2e1f2d849395a23f2dba62d67b0c17ba7 | diff --git a/pydriller/git.py b/pydriller/git.py
index 3beaf5a..4359222 100644
--- a/pydriller/git.py
+++ b/pydriller/git.py
@@ -297,19 +297,23 @@ class Git:
line.startswith('"""') or \
line.startswith("*")
- def get_commits_modified_file(self, filepath: str) -> List[str]:
+ def get_commits_modified_file(self, filepath: str, include_deleted_files=False) -> List[str]:
"""
Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
+ :param bool include_deleted_files: if True, include commits that modifies a deleted file
:return: the list of commits' hash
"""
path = str(Path(filepath))
commits = []
try:
- commits = self.repo.git.log("--follow", "--format=%H", path).split('\n')
+ if include_deleted_files:
+ commits = self.repo.git.log("--follow", "--format=%H", "--", path).split('\n')
+ else:
+ commits = self.repo.git.log("--follow", "--format=%H", path).split('\n')
except GitCommandError:
logger.debug(f"Could not find information of file {path}")
diff --git a/pydriller/repository.py b/pydriller/repository.py
index 9433985..d6079d4 100644
--- a/pydriller/repository.py
+++ b/pydriller/repository.py
@@ -56,6 +56,7 @@ class Repository:
only_commits: List[str] = None,
only_releases: bool = False,
filepath: str = None,
+ include_deleted_files: bool = False,
histogram_diff: bool = False,
skip_whitespaces: bool = False,
clone_repo_to: str = None,
@@ -97,6 +98,7 @@ class Repository:
:param bool skip_whitespaces: add the "-w" option when asking for the diff
:param bool clone_repo_to: if the repo under analysis is remote, clone the repo to the specified directory
:param str filepath: only commits that modified this file will be analyzed
+ :param bool include_deleted_files: include commits modifying a deleted file (useful when analyzing a deleted `filepath`)
:param str order: order of commits. It can be one of: 'date-order',
'author-date-order', 'topo-order', or 'reverse'. Default is reverse.
"""
@@ -130,6 +132,7 @@ class Repository:
"only_releases": only_releases,
"skip_whitespaces": skip_whitespaces,
"filepath": filepath,
+ "include_deleted_files": include_deleted_files,
"filepath_commits": None,
"tagged_commits": None,
"histogram": histogram_diff,
@@ -215,7 +218,11 @@ class Repository:
# git rev-list since it doesn't have the option --follow, necessary to follow
# the renames. Hence, we manually call git log instead
if self._conf.get('filepath') is not None:
- self._conf.set_value('filepath_commits', git.get_commits_modified_file(self._conf.get('filepath')))
+ self._conf.set_value(
+ 'filepath_commits',
+ git.get_commits_modified_file(self._conf.get('filepath'),
+ self._conf.get('include_deleted_files'))
+ )
# Gets only the commits that are tagged
if self._conf.get('only_releases'):
| Add an option to retrieve commits modifying a deleted file
Hi :) I'm extracting the commit history of a certain configuration file on thousands of public repos hosted on Github. Some projects have this file deleted at some point, making it impossible to do so.
It would be nice to add an argument to the `Repository` class, which inserts `--` to the `git log` call:
```python
for commit in Repository(repo
filepath=file_to_look_for,
full_log=True).traverse_commits():
```
This feature is actually quite easy to implement [reference here](https://feeding.cloud.geek.nz/posts/querying-deleted-content-in-git/), [my fork](https://github.com/12f23eddde/pydriller/commit/9a44e7f5526c4dec4f43ed8d3cabf34cfa1254b4) made just a little change to get it working:
```python
def get_commits_modified_file(self, filepath: str, full_log=False) -> List[str]:
if full_log:
commits = self.repo.git.log("--follow", "--format=%H", "--", path).split('\n')
else:
commits = self.repo.git.log("--follow", "--format=%H", path).split('\n')
```
I would like to open a pull request if this change sounds reasonable.
| ishepard/pydriller | diff --git a/tests/test_repository_mining.py b/tests/test_repository_mining.py
index 585148f..0e9b5aa 100644
--- a/tests/test_repository_mining.py
+++ b/tests/test_repository_mining.py
@@ -281,3 +281,12 @@ def test_deletion_remotes():
for path in paths:
assert os.path.exists(path) is False
+
+
+def test_deleted_files():
+ deleted_commits = list(
+ Repository('https://github.com/ishepard/pydriller',
+ filepath='.bettercodehub.yml',
+ include_deleted_files=True).traverse_commits()
+ )
+ assert len(deleted_commits) > 0
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"psutil"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
gitdb==4.0.12
GitPython==3.1.44
iniconfig==2.1.0
lizard==1.17.23
packaging==24.2
pathspec==0.12.1
pluggy==1.5.0
psutil==7.0.0
-e git+https://github.com/ishepard/pydriller.git@036978c2e1f2d849395a23f2dba62d67b0c17ba7#egg=PyDriller
Pygments==2.19.1
pytest==8.3.5
pytz==2025.2
smmap==5.0.2
tomli==2.2.1
types-pytz==2025.2.0.20250326
| name: pydriller
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- gitdb==4.0.12
- gitpython==3.1.44
- iniconfig==2.1.0
- lizard==1.17.23
- packaging==24.2
- pathspec==0.12.1
- pluggy==1.5.0
- psutil==7.0.0
- pygments==2.19.1
- pytest==8.3.5
- pytz==2025.2
- smmap==5.0.2
- tomli==2.2.1
- types-pytz==2025.2.0.20250326
prefix: /opt/conda/envs/pydriller
| [
"tests/test_repository_mining.py::test_deleted_files"
] | [
"tests/test_repository_mining.py::test_ignore_add_whitespaces",
"tests/test_repository_mining.py::test_ignore_deleted_whitespaces",
"tests/test_repository_mining.py::test_ignore_add_whitespaces_and_changed_file",
"tests/test_repository_mining.py::test_projectname_multiple_repos",
"tests/test_repository_mining.py::test_projectname_multiple_repos_remote"
] | [
"tests/test_repository_mining.py::test_no_url",
"tests/test_repository_mining.py::test_badly_formatted_repo_url",
"tests/test_repository_mining.py::test_malformed_url",
"tests/test_repository_mining.py::test_simple_remote_url[repo_to0-159]",
"tests/test_repository_mining.py::test_two_remote_urls[repo_to0-518]",
"tests/test_repository_mining.py::test_badly_formatted_url",
"tests/test_repository_mining.py::test_clone_repo_to",
"tests/test_repository_mining.py::test_clone_repo_to_not_existing",
"tests/test_repository_mining.py::test_clone_repo_to_repeated",
"tests/test_repository_mining.py::test_deletion_remotes"
] | [] | Apache License 2.0 | 12,202 | 826 | [
"pydriller/git.py",
"pydriller/repository.py"
] |
|
sqlfluff__sqlfluff-2625 | d44c83e7cee923869e3ca6149da4e6d1ad0286eb | 2022-02-13 13:24:21 | 9dc33cf6520f0374385a305c1ae1f211b2db7afc | codecov[bot]: # [Codecov](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2625?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff) Report
> Merging [#2625](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2625?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff) (5a7bd2d) into [main](https://codecov.io/gh/sqlfluff/sqlfluff/commit/088c048ebb3cac7fdde6fd0606975d024713abb0?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff) (088c048) will **not change** coverage.
> The diff coverage is `100.00%`.
> :exclamation: Current head 5a7bd2d differs from pull request most recent head bf864c5. Consider uploading reports for the commit bf864c5 to get more accurate results
[](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2625?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff)
```diff
@@ Coverage Diff @@
## main #2625 +/- ##
=========================================
Coverage 100.00% 100.00%
=========================================
Files 163 163
Lines 11900 11914 +14
=========================================
+ Hits 11900 11914 +14
```
| [Impacted Files](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2625?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff) | Coverage Δ | |
|---|---|---|
| [src/sqlfluff/cli/commands.py](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2625/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff#diff-c3JjL3NxbGZsdWZmL2NsaS9jb21tYW5kcy5weQ==) | `100.00% <100.00%> (ø)` | |
------
[Continue to review full report at Codecov](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2625?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2625?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff). Last update [088c048...bf864c5](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2625?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff).
| diff --git a/src/sqlfluff/cli/commands.py b/src/sqlfluff/cli/commands.py
index f92f71dd3..a0276532b 100644
--- a/src/sqlfluff/cli/commands.py
+++ b/src/sqlfluff/cli/commands.py
@@ -6,13 +6,7 @@ import json
import logging
import time
from logging import LogRecord
-from typing import (
- Callable,
- Tuple,
- NoReturn,
- Optional,
- List,
-)
+from typing import Callable, Tuple, NoReturn, Optional, List, cast
import yaml
@@ -418,6 +412,17 @@ def dialects(**kwargs) -> None:
click.echo(format_dialects(dialect_readout), color=c.get("color"))
+def dump_file_payload(filename: Optional[str], payload: str):
+ """Write the output file content to stdout or file."""
+ # If there's a file specified to write to, write to it.
+ if filename:
+ with open(filename, "w") as out_file:
+ out_file.write(payload)
+ # Otherwise write to stdout
+ else:
+ click.echo(payload)
+
+
@cli.command()
@common_options
@core_options
@@ -429,6 +434,14 @@ def dialects(**kwargs) -> None:
type=click.Choice([ft.value for ft in FormatType], case_sensitive=False),
help="What format to return the lint result in (default=human).",
)
[email protected](
+ "--write-output",
+ help=(
+ "Optionally provide a filename to write the results to, mostly used in "
+ "tandem with --format. NB: Setting an output file re-enables normal "
+ "stdout logging."
+ ),
+)
@click.option(
"--annotation-level",
default="notice",
@@ -468,6 +481,7 @@ def lint(
paths: Tuple[str],
processes: int,
format: str,
+ write_output: Optional[str],
annotation_level: str,
nofail: bool,
disregard_sqlfluffignores: bool,
@@ -497,7 +511,8 @@ def lint(
"""
config = get_config(extra_config_path, ignore_local_config, **kwargs)
- non_human_output = format != FormatType.human.value
+ non_human_output = (format != FormatType.human.value) or (write_output is not None)
+ file_output = None
lnt, formatter = get_linter_and_formatter(config, silent=non_human_output)
verbose = config.get("verbose")
@@ -535,9 +550,9 @@ def lint(
click.echo(format_linting_stats(result, verbose=verbose))
if format == FormatType.json.value:
- click.echo(json.dumps(result.as_records()))
+ file_output = json.dumps(result.as_records())
elif format == FormatType.yaml.value:
- click.echo(yaml.dump(result.as_records(), sort_keys=False))
+ file_output = yaml.dump(result.as_records(), sort_keys=False)
elif format == FormatType.github_annotation.value:
github_result = []
for record in result.as_records():
@@ -558,7 +573,10 @@ def lint(
"annotation_level": annotation_level,
}
)
- click.echo(json.dumps(github_result))
+ file_output = json.dumps(github_result)
+
+ if file_output:
+ dump_file_payload(write_output, cast(str, file_output))
if bench:
click.echo("==== overall timings ====")
@@ -876,6 +894,14 @@ def quoted_presenter(dumper, data):
),
help="What format to return the parse result in.",
)
[email protected](
+ "--write-output",
+ help=(
+ "Optionally provide a filename to write the results to, mostly used in "
+ "tandem with --format. NB: Setting an output file re-enables normal "
+ "stdout logging."
+ ),
+)
@click.option(
"--profiler", is_flag=True, help="Set this flag to engage the python profiler."
)
@@ -892,6 +918,7 @@ def parse(
code_only: bool,
include_meta: bool,
format: str,
+ write_output: Optional[str],
profiler: bool,
bench: bool,
nofail: bool,
@@ -909,7 +936,8 @@ def parse(
"""
c = get_config(extra_config_path, ignore_local_config, **kwargs)
# We don't want anything else to be logged if we want json or yaml output
- non_human_output = format in (FormatType.json.value, FormatType.yaml.value)
+ # unless we're writing to a file.
+ non_human_output = (format != FormatType.human.value) or (write_output is not None)
lnt, formatter = get_linter_and_formatter(c, silent=non_human_output)
verbose = c.get("verbose")
recurse = c.get("recurse")
@@ -975,9 +1003,12 @@ def parse(
# For yaml dumping always dump double quoted strings if they contain
# tabs or newlines.
yaml.add_representer(str, quoted_presenter)
- click.echo(yaml.dump(parsed_strings_dict, sort_keys=False))
+ file_output = yaml.dump(parsed_strings_dict, sort_keys=False)
elif format == FormatType.json.value:
- click.echo(json.dumps(parsed_strings_dict))
+ file_output = json.dumps(parsed_strings_dict)
+
+ # Dump the output to stdout or to file as appropriate.
+ dump_file_payload(write_output, file_output)
except OSError: # pragma: no cover
click.echo(
| Suppress dbt logs and warnings when using --format github-annotation
Sometimes, running:
```
sqlfluff lint --format github-annotation --annotation-level failure --nofail
```
Can result in the first couple of output lines being logs which break the annotations, for example:
```
14:21:42 Partial parse save file not found. Starting full parse.
Warning: [WARNING]: Did not find matching node for patch with name 'xxxx' in the 'models' section of file 'models/production/xxxxx/xxxxx.yml'
```
## Version
dbt 1.0.0, SQLFLuff 0.9.0
| sqlfluff/sqlfluff | diff --git a/test/cli/commands_test.py b/test/cli/commands_test.py
index 668beaaca..522109509 100644
--- a/test/cli/commands_test.py
+++ b/test/cli/commands_test.py
@@ -798,19 +798,35 @@ def test__cli__command__fix_no_force(rule, fname, prompt, exit_code, fix_exit_co
@pytest.mark.parametrize("serialize", ["yaml", "json"])
-def test__cli__command_parse_serialize_from_stdin(serialize):
[email protected]("write_file", [None, "outfile"])
+def test__cli__command_parse_serialize_from_stdin(serialize, write_file, tmp_path):
"""Check that the parser serialized output option is working.
+ This tests both output to stdout and output to file.
+
Not going to test for the content of the output as that is subject to change.
"""
+ cmd_args = ("-", "--format", serialize)
+
+ if write_file:
+ target_file = os.path.join(tmp_path, write_file + "." + serialize)
+ cmd_args += ("--write-output", target_file)
+
result = invoke_assert_code(
- args=[parse, ("-", "--format", serialize)],
+ args=[parse, cmd_args],
cli_input="select * from tbl",
)
+
+ if write_file:
+ with open(target_file, "r") as payload_file:
+ result_payload = payload_file.read()
+ else:
+ result_payload = result.output
+
if serialize == "json":
- result = json.loads(result.output)
+ result = json.loads(result_payload)
elif serialize == "yaml":
- result = yaml.safe_load(result.output)
+ result = yaml.safe_load(result_payload)
else:
raise Exception
result = result[0] # only one file
@@ -880,24 +896,42 @@ def test__cli__command_fail_nice_not_found(command):
@pytest.mark.parametrize("serialize", ["yaml", "json", "github-annotation"])
-def test__cli__command_lint_serialize_multiple_files(serialize):
- """Check the general format of JSON output for multiple files."""
[email protected]("write_file", [None, "outfile"])
+def test__cli__command_lint_serialize_multiple_files(serialize, write_file, tmp_path):
+ """Check the general format of JSON output for multiple files.
+
+ This tests runs both stdout checking and file checking.
+ """
fpath = "test/fixtures/linter/indentation_errors.sql"
+ cmd_args = (fpath, fpath, "--format", serialize, "--disable_progress_bar")
+
+ if write_file:
+ target_file = os.path.join(
+ tmp_path, write_file + (".yaml" if serialize == "yaml" else ".json")
+ )
+ cmd_args += ("--write-output", target_file)
+
# note the file is in here twice. two files = two payloads.
result = invoke_assert_code(
- args=[lint, (fpath, fpath, "--format", serialize, "--disable_progress_bar")],
+ args=[lint, cmd_args],
ret_code=65,
)
+ if write_file:
+ with open(target_file, "r") as payload_file:
+ result_payload = payload_file.read()
+ else:
+ result_payload = result.output
+
if serialize == "json":
- result = json.loads(result.output)
+ result = json.loads(result_payload)
assert len(result) == 2
elif serialize == "yaml":
- result = yaml.safe_load(result.output)
+ result = yaml.safe_load(result_payload)
assert len(result) == 2
elif serialize == "github-annotation":
- result = json.loads(result.output)
+ result = json.loads(result_payload)
filepaths = {r["file"] for r in result}
assert len(filepaths) == 1
else:
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt",
"requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
attrs==25.3.0
black==25.1.0
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
click==8.1.8
colorama==0.4.6
coverage==6.2
diff_cover==9.2.4
doc8==1.1.2
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
flake8==7.2.0
flake8-black==0.3.6
flake8-docstrings==1.7.0
hypothesis==6.130.5
idna==3.10
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
mccabe==0.7.0
mypy==1.15.0
mypy-extensions==1.0.0
packaging==24.2
pathspec==0.12.1
pbr==6.1.1
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pydocstyle==6.3.0
pyflakes==3.3.1
Pygments==2.19.1
pytest==8.3.5
pytest-cov==5.0.0
pytest-sugar==1.0.0
pytest-xdist==3.6.1
PyYAML==6.0.2
regex==2024.11.6
requests==2.32.3
restructuredtext_lint==1.4.0
snowballstemmer==2.2.0
sortedcontainers==2.4.0
-e git+https://github.com/sqlfluff/sqlfluff.git@d44c83e7cee923869e3ca6149da4e6d1ad0286eb#egg=sqlfluff
stevedore==5.4.1
tblib==3.0.0
termcolor==2.5.0
toml==0.10.2
tomli==2.2.1
tqdm==4.67.1
types-appdirs==1.4.3.5
types-chardet==5.0.4.6
types-colorama==0.4.15.20240311
types-PyYAML==6.0.12.20250326
types-regex==2024.11.6.20250318
types-setuptools==78.1.0.20250329
types-toml==0.10.8.20240310
typing_extensions==4.13.0
urllib3==2.3.0
yamllint==1.37.0
| name: sqlfluff
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- attrs==25.3.0
- black==25.1.0
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- click==8.1.8
- colorama==0.4.6
- coverage==6.2
- diff-cover==9.2.4
- doc8==1.1.2
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- flake8==7.2.0
- flake8-black==0.3.6
- flake8-docstrings==1.7.0
- hypothesis==6.130.5
- idna==3.10
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mccabe==0.7.0
- mypy==1.15.0
- mypy-extensions==1.0.0
- packaging==24.2
- pathspec==0.12.1
- pbr==6.1.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pydocstyle==6.3.0
- pyflakes==3.3.1
- pygments==2.19.1
- pytest==8.3.5
- pytest-cov==5.0.0
- pytest-sugar==1.0.0
- pytest-xdist==3.6.1
- pyyaml==6.0.2
- regex==2024.11.6
- requests==2.32.3
- restructuredtext-lint==1.4.0
- snowballstemmer==2.2.0
- sortedcontainers==2.4.0
- stevedore==5.4.1
- tblib==3.0.0
- termcolor==2.5.0
- toml==0.10.2
- tomli==2.2.1
- tqdm==4.67.1
- types-appdirs==1.4.3.5
- types-chardet==5.0.4.6
- types-colorama==0.4.15.20240311
- types-pyyaml==6.0.12.20250326
- types-regex==2024.11.6.20250318
- types-setuptools==78.1.0.20250329
- types-toml==0.10.8.20240310
- typing-extensions==4.13.0
- urllib3==2.3.0
- yamllint==1.37.0
prefix: /opt/conda/envs/sqlfluff
| [
"test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[outfile-yaml]",
"test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[outfile-json]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[outfile-yaml]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[outfile-json]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[outfile-github-annotation]"
] | [] | [
"test/cli/commands_test.py::test__cli__command_directed",
"test/cli/commands_test.py::test__cli__command_dialect",
"test/cli/commands_test.py::test__cli__command_dialect_legacy",
"test/cli/commands_test.py::test__cli__command_extra_config_fail",
"test/cli/commands_test.py::test__cli__command_lint_stdin[command0]",
"test/cli/commands_test.py::test__cli__command_lint_stdin[command1]",
"test/cli/commands_test.py::test__cli__command_lint_stdin[command2]",
"test/cli/commands_test.py::test__cli__command_lint_stdin[command3]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command0]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command1]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command2]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command3]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command4]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command5]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command6]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command7]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command8]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command9]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command10]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command11]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command12]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command13]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command14]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command15]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command16]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command17]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command18]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command19]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command20]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command21]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command22]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command23]",
"test/cli/commands_test.py::test__cli__command_lint_parse_with_retcode[command0-1]",
"test/cli/commands_test.py::test__cli__command_lint_parse_with_retcode[command1-1]",
"test/cli/commands_test.py::test__cli__command_lint_parse_with_retcode[command2-1]",
"test/cli/commands_test.py::test__cli__command_lint_warning_explicit_file_ignored",
"test/cli/commands_test.py::test__cli__command_lint_skip_ignore_files",
"test/cli/commands_test.py::test__cli__command_lint_ignore_local_config",
"test/cli/commands_test.py::test__cli__command_versioning",
"test/cli/commands_test.py::test__cli__command_version",
"test/cli/commands_test.py::test__cli__command_rules",
"test/cli/commands_test.py::test__cli__command_dialects",
"test/cli/commands_test.py::test__cli__command__fix[L001-test/fixtures/linter/indentation_errors.sql]",
"test/cli/commands_test.py::test__cli__command__fix[L008-test/fixtures/linter/whitespace_errors.sql]",
"test/cli/commands_test.py::test__cli__command__fix[L008-test/fixtures/linter/indentation_errors.sql]",
"test/cli/commands_test.py::test__cli__command__fix[L003-test/fixtures/linter/indentation_error_hard.sql]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[1_lint_error_1_unsuppressed_parse_error]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[1_lint_error_1_unsuppressed_templating_error]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[1_lint_error_1_suppressed_parse_error]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[0_lint_errors_1_unsuppressed_parse_error]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[0_lint_errors_1_suppressed_parse_error]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[1_lint_error_1_unsuppressed_parse_error_FIX_EVEN_UNPARSABLE]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[2_files_with_lint_errors_1_unsuppressed_parse_error]",
"test/cli/commands_test.py::test__cli__command_fix_stdin[select",
"test/cli/commands_test.py::test__cli__command_fix_stdin[",
"test/cli/commands_test.py::test__cli__command_fix_stdin[SELECT",
"test/cli/commands_test.py::test__cli__command_fix_stdin_logging_to_stderr",
"test/cli/commands_test.py::test__cli__command_fix_stdin_safety",
"test/cli/commands_test.py::test__cli__command_fix_stdin_error_exit_code[create",
"test/cli/commands_test.py::test__cli__command_fix_stdin_error_exit_code[select",
"test/cli/commands_test.py::test__cli__command__fix_no_force[L001-test/fixtures/linter/indentation_errors.sql-y-0-0]",
"test/cli/commands_test.py::test__cli__command__fix_no_force[L001-test/fixtures/linter/indentation_errors.sql-n-65-1]",
"test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[None-yaml]",
"test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[None-json]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_from_stdin[select",
"test/cli/commands_test.py::test__cli__command_lint_serialize_from_stdin[SElect",
"test/cli/commands_test.py::test__cli__command_fail_nice_not_found[command0]",
"test/cli/commands_test.py::test__cli__command_fail_nice_not_found[command1]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[None-yaml]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[None-json]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[None-github-annotation]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_github_annotation",
"test/cli/commands_test.py::test___main___help",
"test/cli/commands_test.py::test_encoding[utf-8-ascii]",
"test/cli/commands_test.py::test_encoding[utf-8-sig-UTF-8-SIG]",
"test/cli/commands_test.py::test_encoding[utf-32-UTF-32]",
"test/cli/commands_test.py::test_cli_pass_on_correct_encoding_argument",
"test/cli/commands_test.py::test_cli_fail_on_wrong_encoding_argument",
"test/cli/commands_test.py::test_cli_no_disable_noqa_flag",
"test/cli/commands_test.py::test_cli_disable_noqa_flag",
"test/cli/commands_test.py::test_cli_get_default_config",
"test/cli/commands_test.py::TestProgressBars::test_cli_lint_disabled_progress_bar",
"test/cli/commands_test.py::TestProgressBars::test_cli_lint_enabled_progress_bar",
"test/cli/commands_test.py::TestProgressBars::test_cli_lint_enabled_progress_bar_multiple_paths",
"test/cli/commands_test.py::TestProgressBars::test_cli_lint_enabled_progress_bar_multiple_files",
"test/cli/commands_test.py::TestProgressBars::test_cli_lint_disabled_progress_bar_when_verbose_mode"
] | [] | MIT License | 12,215 | 1,294 | [
"src/sqlfluff/cli/commands.py"
] |
sqlfluff__sqlfluff-2628 | 13f732de1a08ea7a6437392ea162dba016acb8fd | 2022-02-13 19:27:38 | 9dc33cf6520f0374385a305c1ae1f211b2db7afc | codecov[bot]: # [Codecov](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2628?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff) Report
> Merging [#2628](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2628?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff) (e169a0e) into [main](https://codecov.io/gh/sqlfluff/sqlfluff/commit/13f732de1a08ea7a6437392ea162dba016acb8fd?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff) (13f732d) will **decrease** coverage by `0.01%`.
> The diff coverage is `100.00%`.
[](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2628?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff)
```diff
@@ Coverage Diff @@
## main #2628 +/- ##
===========================================
- Coverage 100.00% 99.98% -0.02%
===========================================
Files 163 163
Lines 11915 11923 +8
===========================================
+ Hits 11915 11921 +6
- Misses 0 2 +2
```
| [Impacted Files](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2628?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff) | Coverage Δ | |
|---|---|---|
| [src/sqlfluff/core/linter/linter.py](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2628/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff#diff-c3JjL3NxbGZsdWZmL2NvcmUvbGludGVyL2xpbnRlci5weQ==) | `99.48% <100.00%> (-0.52%)` | :arrow_down: |
| [src/sqlfluff/core/rules/base.py](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2628/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff#diff-c3JjL3NxbGZsdWZmL2NvcmUvcnVsZXMvYmFzZS5weQ==) | `100.00% <100.00%> (ø)` | |
------
[Continue to review full report at Codecov](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2628?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2628?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff). Last update [13f732d...e169a0e](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2628?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff).
barrywhart: @juhoautio: Given there is work remaining to do on #2624 (i.e. rules that are currently causing the additional lint check to fail), I think your suggestion about the linter loop should be a separate issue after all. It's a great idea, but we shouldn't add it while there are still known issues. | diff --git a/src/sqlfluff/core/linter/linter.py b/src/sqlfluff/core/linter/linter.py
index eaaaf8a7f..f86d936a3 100644
--- a/src/sqlfluff/core/linter/linter.py
+++ b/src/sqlfluff/core/linter/linter.py
@@ -483,6 +483,7 @@ class Linter:
else:
ignore_buff = []
+ save_tree = tree
for loop in range(loop_limit):
changed = False
@@ -542,8 +543,31 @@ class Linter:
"loops."
)
break
- if fix and loop + 1 == loop_limit:
- linter_logger.warning(f"Loop limit on fixes reached [{loop_limit}].")
+ else:
+ if fix:
+ # The linter loop hit the limit before reaching a stable point
+ # (i.e. free of lint errors). If this happens, it's usually
+ # because one or more rules produced fixes which did not address
+ # the original issue **or** created new issues.
+ linter_logger.warning(f"Loop limit on fixes reached [{loop_limit}].")
+
+ # Discard any fixes for the linting errors, since they caused a
+ # loop. IMPORTANT: By doing this, we are telling SQLFluff that
+ # these linting errors are "unfixable". This is important,
+ # because when "sqlfluff fix" encounters unfixable lint errors,
+ # it exits with a "failure" exit code, which is exactly what we
+ # want in this situation. (Reason: Although this is more of an
+ # internal SQLFluff issue, users deserve to know about it,
+ # because it means their file(s) weren't fixed.
+ for violation in initial_linting_errors:
+ if isinstance(violation, SQLLintError):
+ violation.fixes = []
+
+ # Return the original parse tree, before any fixes were applied.
+ # Reason: When the linter hits the loop limit, the file is often
+ # messy, e.g. some of the fixes were applied repeatedly, possibly
+ # other weird things. We don't want the user to see this junk!
+ return save_tree, initial_linting_errors, ignore_buff
if config.get("ignore_templated_areas", default=True):
initial_linting_errors = cls.remove_templated_errors(initial_linting_errors)
diff --git a/src/sqlfluff/core/rules/base.py b/src/sqlfluff/core/rules/base.py
index 2706f9d79..fed7ff602 100644
--- a/src/sqlfluff/core/rules/base.py
+++ b/src/sqlfluff/core/rules/base.py
@@ -555,23 +555,8 @@ class BaseRule:
)
return vs, raw_stack, fixes, memory
- new_lerrs = []
- new_fixes = []
-
- def _process_lint_result(res):
- self.discard_unsafe_fixes(res, templated_file)
- lerr = res.to_linting_error(rule=self)
- ignored = False
- if lerr:
- if ignore_mask:
- filtered = LintedFile.ignore_masked_violations([lerr], ignore_mask)
- if not filtered:
- lerr = None
- ignored = True
- if lerr:
- new_lerrs.append(lerr)
- if not ignored:
- new_fixes.extend(res.fixes)
+ new_lerrs: List[SQLLintError] = []
+ new_fixes: List[LintFix] = []
if res is None:
# Assume this means no problems (also means no memory)
@@ -579,7 +564,9 @@ class BaseRule:
elif isinstance(res, LintResult):
# Extract any memory
memory = res.memory
- _process_lint_result(res)
+ self._process_lint_result(
+ res, templated_file, ignore_mask, new_lerrs, new_fixes
+ )
elif isinstance(res, list) and all(
isinstance(elem, LintResult) for elem in res
):
@@ -587,7 +574,9 @@ class BaseRule:
# it was the last to be added
memory = res[-1].memory
for elem in res:
- _process_lint_result(elem)
+ self._process_lint_result(
+ elem, templated_file, ignore_mask, new_lerrs, new_fixes
+ )
else: # pragma: no cover
raise TypeError(
"Got unexpected result [{!r}] back from linting rule: {!r}".format(
@@ -629,6 +618,23 @@ class BaseRule:
# HELPER METHODS --------
+ def _process_lint_result(
+ self, res, templated_file, ignore_mask, new_lerrs, new_fixes
+ ):
+ self.discard_unsafe_fixes(res, templated_file)
+ lerr = res.to_linting_error(rule=self)
+ ignored = False
+ if lerr:
+ if ignore_mask:
+ filtered = LintedFile.ignore_masked_violations([lerr], ignore_mask)
+ if not filtered:
+ lerr = None
+ ignored = True
+ if lerr:
+ new_lerrs.append(lerr)
+ if not ignored:
+ new_fixes.extend(res.fixes)
+
@cached_property
def indent(self) -> str:
"""String for a single indent, based on configuration."""
| When linter loop limit is hit, should report an error/exception
When the loop limit is hit, `sqlfluff fix` logs a warning:
```
Loop limit on fixes reached
```
This is a pretty "quiet" notification that can mask issues. Suggest we change this:
* Increase the default loop limit from 10 to 25 iterations.
* Log an ERROR and/or raise an exception. If an exception, suggest (maybe?) raising one that is caught and included in the list of lint issues, not one that stops the `fix` run. Open to other ideas, but I think this is the right tradeoff between raising visibility of what is _likely_ an infinite loop in the linter run while still giving the user useful `fix` output.
Related to #1344, among others.
| sqlfluff/sqlfluff | diff --git a/test/cli/commands_test.py b/test/cli/commands_test.py
index 522109509..1104e87b9 100644
--- a/test/cli/commands_test.py
+++ b/test/cli/commands_test.py
@@ -21,6 +21,8 @@ from click.testing import CliRunner
# We import the library directly here to get the version
import sqlfluff
from sqlfluff.cli.commands import lint, version, rules, fix, parse, dialects, get_config
+from sqlfluff.core.rules.base import BaseRule, LintFix, LintResult
+from sqlfluff.core.parser.segments.raw import CommentSegment
def invoke_assert_code(
@@ -670,6 +672,66 @@ def test__cli__fix_error_handling_behavior(sql, fix_args, fixed, exit_code, tmpd
assert not fixed_path.is_file()
+_old_crawl = BaseRule.crawl
+_fix_counter = 0
+
+
+def _mock_crawl(rule, segment, ignore_mask, templated_file=None, *args, **kwargs):
+ # For test__cli__fix_loop_limit_behavior, we mock BaseRule.crawl(),
+ # replacing it with this function. This function generates an infinite
+ # sequence of fixes without ever repeating the same fix. This causes the
+ # linter to hit the loop limit, allowing us to test that behavior.
+ if segment.is_type("comment") and "Comment" in segment.raw:
+ global _fix_counter
+ _fix_counter += 1
+ fix = LintFix.replace(segment, [CommentSegment(f"-- Comment {_fix_counter}")])
+ result = LintResult(segment, fixes=[fix])
+ errors = []
+ fixes = []
+ rule._process_lint_result(result, templated_file, ignore_mask, errors, fixes)
+ return (
+ errors,
+ None,
+ fixes,
+ None,
+ )
+ else:
+ return _old_crawl(
+ rule, segment, ignore_mask, templated_file=templated_file, *args, **kwargs
+ )
+
+
[email protected](
+ "sql, exit_code",
+ [
+ ("-- Comment A\nSELECT 1 FROM foo", 1),
+ ("-- noqa: disable=all\n-- Comment A\nSELECT 1 FROM foo", 0),
+ ],
+)
+@patch("sqlfluff.core.rules.base.BaseRule.crawl", _mock_crawl)
+def test__cli__fix_loop_limit_behavior(sql, exit_code, tmpdir):
+ """Tests how "fix" behaves when the loop limit is exceeded."""
+ fix_args = ["--force", "--fixed-suffix", "FIXED", "--rules", "L001"]
+ tmp_path = pathlib.Path(str(tmpdir))
+ filepath = tmp_path / "testing.sql"
+ filepath.write_text(textwrap.dedent(sql))
+ with tmpdir.as_cwd():
+ with pytest.raises(SystemExit) as e:
+ fix(
+ fix_args
+ + [
+ "-f",
+ ]
+ )
+ assert exit_code == e.value.code
+ # In both parametrized test cases, no output file should have been
+ # created.
+ # - Case #1: Hitting the loop limit is an error
+ # - Case #2: "noqa" suppressed all lint errors, thus no fixes applied
+ fixed_path = tmp_path / "testingFIXED.sql"
+ assert not fixed_path.is_file()
+
+
# Test case disabled because there isn't a good example of where to test this.
# This *should* test the case where a rule DOES have a proposed fix, but for
# some reason when we try to apply it, there's a failure.
diff --git a/test/core/rules/config_test.py b/test/core/rules/config_test.py
index 35580643c..e3674fbc5 100644
--- a/test/core/rules/config_test.py
+++ b/test/core/rules/config_test.py
@@ -56,9 +56,8 @@ def test__rules__runaway_fail_catch():
# In theory this step should result in an infinite
# loop, but the loop limit should catch it.
linted = linter.lint_string(my_query, fix=True)
- # We should have a lot of newlines in there.
- # The number should equal the runaway limit
- assert linted.tree.raw.count("\n") == runaway_limit
+ # When the linter hits the runaway limit, it returns the original SQL tree.
+ assert linted.tree.raw == my_query
def test_rules_cannot_be_instantiated_without_declared_configs():
diff --git a/test/fixtures/rules/std_rule_cases/L019.yml b/test/fixtures/rules/std_rule_cases/L019.yml
index 32bb81101..6f842aa68 100644
--- a/test/fixtures/rules/std_rule_cases/L019.yml
+++ b/test/fixtures/rules/std_rule_cases/L019.yml
@@ -230,10 +230,10 @@ trailing_comma_move_past_several_comment_lines:
global_actions_states
configs:
core:
- # Set runaway_limit=1 to verify the fix only requires one pass. In an
+ # Set runaway_limit=2 to verify the fix only requires one pass. In an
# earlier version, the comma before "SAFE_DIVIDE()" was being moved one
# line per pass. Too lazy!
- runaway_limit: 1
+ runaway_limit: 2
leading_comma_move_past_several_comment_lines:
@@ -267,10 +267,10 @@ leading_comma_move_past_several_comment_lines:
global_actions_states
configs:
core:
- # Set runaway_limit=1 to verify the fix only requires one pass. In an
+ # Set runaway_limit=2 to verify the fix only requires one pass. In an
# earlier version for the trailing comma case, commas were being moved
# "through" comment blocks one line per pass. Too lazy!
- runaway_limit: 1
+ runaway_limit: 2
rules:
L019:
comma_style: leading
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 0.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
chardet==5.2.0
click==8.1.8
colorama==0.4.6
diff_cover==9.2.4
exceptiongroup==1.2.2
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
packaging==24.2
pathspec==0.12.1
pluggy==1.5.0
Pygments==2.19.1
pytest==8.3.5
PyYAML==6.0.2
regex==2024.11.6
-e git+https://github.com/sqlfluff/sqlfluff.git@13f732de1a08ea7a6437392ea162dba016acb8fd#egg=sqlfluff
tblib==3.0.0
toml==0.10.2
tomli==2.2.1
tqdm==4.67.1
typing_extensions==4.13.0
| name: sqlfluff
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- chardet==5.2.0
- click==8.1.8
- colorama==0.4.6
- diff-cover==9.2.4
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- packaging==24.2
- pathspec==0.12.1
- pluggy==1.5.0
- pygments==2.19.1
- pytest==8.3.5
- pyyaml==6.0.2
- regex==2024.11.6
- tblib==3.0.0
- toml==0.10.2
- tomli==2.2.1
- tqdm==4.67.1
- typing-extensions==4.13.0
prefix: /opt/conda/envs/sqlfluff
| [
"test/cli/commands_test.py::test__cli__fix_loop_limit_behavior[--",
"test/core/rules/config_test.py::test__rules__runaway_fail_catch"
] | [] | [
"test/cli/commands_test.py::test__cli__command_directed",
"test/cli/commands_test.py::test__cli__command_dialect",
"test/cli/commands_test.py::test__cli__command_dialect_legacy",
"test/cli/commands_test.py::test__cli__command_extra_config_fail",
"test/cli/commands_test.py::test__cli__command_lint_stdin[command0]",
"test/cli/commands_test.py::test__cli__command_lint_stdin[command1]",
"test/cli/commands_test.py::test__cli__command_lint_stdin[command2]",
"test/cli/commands_test.py::test__cli__command_lint_stdin[command3]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command0]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command1]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command2]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command3]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command4]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command5]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command6]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command7]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command8]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command9]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command10]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command11]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command12]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command13]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command14]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command15]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command16]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command17]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command18]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command19]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command20]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command21]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command22]",
"test/cli/commands_test.py::test__cli__command_lint_parse[command23]",
"test/cli/commands_test.py::test__cli__command_lint_parse_with_retcode[command0-1]",
"test/cli/commands_test.py::test__cli__command_lint_parse_with_retcode[command1-1]",
"test/cli/commands_test.py::test__cli__command_lint_parse_with_retcode[command2-1]",
"test/cli/commands_test.py::test__cli__command_lint_warning_explicit_file_ignored",
"test/cli/commands_test.py::test__cli__command_lint_skip_ignore_files",
"test/cli/commands_test.py::test__cli__command_lint_ignore_local_config",
"test/cli/commands_test.py::test__cli__command_versioning",
"test/cli/commands_test.py::test__cli__command_version",
"test/cli/commands_test.py::test__cli__command_rules",
"test/cli/commands_test.py::test__cli__command_dialects",
"test/cli/commands_test.py::test__cli__command__fix[L001-test/fixtures/linter/indentation_errors.sql]",
"test/cli/commands_test.py::test__cli__command__fix[L008-test/fixtures/linter/whitespace_errors.sql]",
"test/cli/commands_test.py::test__cli__command__fix[L008-test/fixtures/linter/indentation_errors.sql]",
"test/cli/commands_test.py::test__cli__command__fix[L003-test/fixtures/linter/indentation_error_hard.sql]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[1_lint_error_1_unsuppressed_parse_error]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[1_lint_error_1_unsuppressed_templating_error]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[1_lint_error_1_suppressed_parse_error]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[0_lint_errors_1_unsuppressed_parse_error]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[0_lint_errors_1_suppressed_parse_error]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[1_lint_error_1_unsuppressed_parse_error_FIX_EVEN_UNPARSABLE]",
"test/cli/commands_test.py::test__cli__fix_error_handling_behavior[2_files_with_lint_errors_1_unsuppressed_parse_error]",
"test/cli/commands_test.py::test__cli__command_fix_stdin[select",
"test/cli/commands_test.py::test__cli__command_fix_stdin[",
"test/cli/commands_test.py::test__cli__command_fix_stdin[SELECT",
"test/cli/commands_test.py::test__cli__command_fix_stdin_logging_to_stderr",
"test/cli/commands_test.py::test__cli__command_fix_stdin_safety",
"test/cli/commands_test.py::test__cli__command_fix_stdin_error_exit_code[create",
"test/cli/commands_test.py::test__cli__command_fix_stdin_error_exit_code[select",
"test/cli/commands_test.py::test__cli__command__fix_no_force[L001-test/fixtures/linter/indentation_errors.sql-y-0-0]",
"test/cli/commands_test.py::test__cli__command__fix_no_force[L001-test/fixtures/linter/indentation_errors.sql-n-65-1]",
"test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[None-yaml]",
"test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[None-json]",
"test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[outfile-yaml]",
"test/cli/commands_test.py::test__cli__command_parse_serialize_from_stdin[outfile-json]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_from_stdin[select",
"test/cli/commands_test.py::test__cli__command_lint_serialize_from_stdin[SElect",
"test/cli/commands_test.py::test__cli__command_fail_nice_not_found[command0]",
"test/cli/commands_test.py::test__cli__command_fail_nice_not_found[command1]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[None-yaml]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[None-json]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[None-github-annotation]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[outfile-yaml]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[outfile-json]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_multiple_files[outfile-github-annotation]",
"test/cli/commands_test.py::test__cli__command_lint_serialize_github_annotation",
"test/cli/commands_test.py::test___main___help",
"test/cli/commands_test.py::test_encoding[utf-8-ascii]",
"test/cli/commands_test.py::test_encoding[utf-8-sig-UTF-8-SIG]",
"test/cli/commands_test.py::test_encoding[utf-32-UTF-32]",
"test/cli/commands_test.py::test_cli_pass_on_correct_encoding_argument",
"test/cli/commands_test.py::test_cli_fail_on_wrong_encoding_argument",
"test/cli/commands_test.py::test_cli_no_disable_noqa_flag",
"test/cli/commands_test.py::test_cli_disable_noqa_flag",
"test/cli/commands_test.py::test_cli_get_default_config",
"test/cli/commands_test.py::TestProgressBars::test_cli_lint_disabled_progress_bar",
"test/cli/commands_test.py::TestProgressBars::test_cli_lint_enabled_progress_bar",
"test/cli/commands_test.py::TestProgressBars::test_cli_lint_enabled_progress_bar_multiple_paths",
"test/cli/commands_test.py::TestProgressBars::test_cli_lint_enabled_progress_bar_multiple_files",
"test/cli/commands_test.py::TestProgressBars::test_cli_lint_disabled_progress_bar_when_verbose_mode",
"test/core/rules/config_test.py::test__rules__user_rules",
"test/core/rules/config_test.py::test_rules_cannot_be_instantiated_without_declared_configs",
"test/core/rules/config_test.py::test_rules_configs_are_dynamically_documented",
"test/core/rules/config_test.py::test_rule_exception_is_caught_to_validation",
"test/core/rules/config_test.py::test_std_rule_import_fail_bad_naming",
"test/core/rules/config_test.py::test_rule_set_return_informative_error_when_rule_not_registered"
] | [] | MIT License | 12,218 | 1,283 | [
"src/sqlfluff/core/linter/linter.py",
"src/sqlfluff/core/rules/base.py"
] |
E3SM-Project__zstash-197 | 0b18fd20031b48b91f604b886fb66b666284de43 | 2022-02-15 00:25:53 | 4ad40629ec9f433318d680756dddbe7b61a3c7a5 | diff --git a/zstash/extract.py b/zstash/extract.py
index 56704d7..bc44ac7 100644
--- a/zstash/extract.py
+++ b/zstash/extract.py
@@ -232,7 +232,11 @@ def extract_database(
u"select * from files where name GLOB ? or tar GLOB ?",
(args_file, args_file),
)
- matches_ = matches_ + cur.fetchall()
+ match: List[TupleFilesRow] = cur.fetchall()
+ if match:
+ matches_ = matches_ + match
+ else:
+ logger.info("No matches for {}".format(args_file))
matches: List[FilesRow] = list(map(lambda match: FilesRow(match), matches_))
| Improve Zstash extract warnings
Print a short explanation on the screen if the match doesn't return any files. | E3SM-Project/zstash | diff --git a/tests/test_extract.py b/tests/test_extract.py
index ff129c0..cdc9c62 100644
--- a/tests/test_extract.py
+++ b/tests/test_extract.py
@@ -24,13 +24,13 @@ class TestExtract(TestZstash):
# `zstash extract` is tested in TestExtract and TestExtractParallel.
# x = on, no mark = off, b = both on and off tested
- # option | ExtractVerbose | Extract | ExtractCache | ExtractTars | ExtractParallel | ExtractParallelTars |
- # --hpss |x|x|x|x|x|x|
- # --workers | | | | |x|x|
- # --cache | | |x| | | |
- # --keep | |x| | | | |
- # --tars | | | |x| |x|
- # -v |x| | | |b| |
+ # option | ExtractVerbose | Extract | ExtractCache | ExtractTars | ExtractFile | ExtractParallel | ExtractParallelTars |
+ # --hpss |x|x|x|x|x|x|x|
+ # --workers | | | | | |x|x|
+ # --cache | | |x| | | | |
+ # --keep | |x| | | | | |
+ # --tars | | | |x| | |x|
+ # -v |x| | | | |b| |
def helperExtractVerbose(self, test_name, hpss_path, zstash_path=ZSTASH_PATH):
"""
@@ -208,6 +208,42 @@ class TestExtract(TestZstash):
)
self.stop(error_message)
+ def helperExtractFile(self, test_name, hpss_path, zstash_path=ZSTASH_PATH):
+ """
+ Test `zstash extract` with a specific file.
+ """
+ self.hpss_path = hpss_path
+ use_hpss = self.setupDirs(test_name)
+ self.create(use_hpss, zstash_path)
+ self.assertWorkspace()
+ os.rename(self.test_dir, self.backup_dir)
+ os.mkdir(self.test_dir)
+ os.chdir(self.test_dir)
+ if not use_hpss:
+ shutil.copytree(
+ "{}/{}/{}".format(TOP_LEVEL, self.backup_dir, self.cache), self.copy_dir
+ )
+ cmd = "{}zstash extract --hpss={} file1.txt".format(zstash_path, self.hpss_path)
+ output, err = run_cmd(cmd)
+ os.chdir(TOP_LEVEL)
+ expected_present = [
+ "INFO: No matches for file1.txt",
+ ]
+ expected_absent = ["ERROR", "Not extracting"]
+ self.check_strings(cmd, output + err, expected_present, expected_absent)
+
+ os.chdir(self.test_dir)
+ cmd = "{}zstash extract --hpss={} file0.txt".format(zstash_path, self.hpss_path)
+ output, err = run_cmd(cmd)
+ os.chdir(TOP_LEVEL)
+ expected_present = [
+ "Extracting file0.txt",
+ ]
+ if use_hpss:
+ expected_present.append("Transferring file from HPSS")
+ expected_absent = ["ERROR", "Not extracting"]
+ self.check_strings(cmd, output + err, expected_present, expected_absent)
+
def testExtractVerbose(self):
self.helperExtractVerbose("testExtractVerbose", "none")
@@ -236,6 +272,13 @@ class TestExtract(TestZstash):
self.conditional_hpss_skip()
helperExtractTars(self, "testExtractTarsHPSS", HPSS_ARCHIVE)
+ def testExtractFile(self):
+ self.helperExtractFile("testExtractFile", "none")
+
+ def testExtractFileHPSS(self):
+ self.conditional_hpss_skip()
+ self.helperExtractFile("testExtractFileHPSS", HPSS_ARCHIVE)
+
def helperExtractTars(
tester, test_name, hpss_path, worker_str="", zstash_path=ZSTASH_PATH
diff --git a/tests/test_extract_parallel.py b/tests/test_extract_parallel.py
index bbbd360..c9dfd69 100644
--- a/tests/test_extract_parallel.py
+++ b/tests/test_extract_parallel.py
@@ -24,13 +24,13 @@ class TestExtractParallel(TestZstash):
# `zstash extract` is tested in TestExtract and TestExtractParallel.
# x = on, no mark = off, b = both on and off tested
- # option | ExtractVerbose | Extract | ExtractCache | ExtractTars | ExtractParallel | ExtractParallelTars |
- # --hpss |x|x|x|x|x|x|
- # --workers | | | | |x|x|
- # --cache | | |x| | | |
- # --keep | |x| | | | |
- # --tars | | | |x| |x|
- # -v |x| | | |b| |
+ # option | ExtractVerbose | Extract | ExtractCache | ExtractTars | ExtractFile | ExtractParallel | ExtractParallelTars |
+ # --hpss |x|x|x|x|x|x|x|
+ # --workers | | | | | |x|x|
+ # --cache | | |x| | | | |
+ # --keep | |x| | | | | |
+ # --tars | | | |x| | |x|
+ # -v |x| | | | |b| |
def helperExtractParallel(self, test_name, hpss_path, zstash_path=ZSTASH_PATH):
"""
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 1.2 | {
"env_vars": null,
"env_yml_path": [
"conda/dev.yml"
],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "environment.yml",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7.10",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster @ file:///home/conda/feedstock_root/build_artifacts/alabaster_1673645646525/work
appdirs @ file:///home/conda/feedstock_root/build_artifacts/appdirs_1603108395799/work
attrs==19.3.0
Babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1702422572539/work
black @ file:///home/conda/feedstock_root/build_artifacts/black-recipe_1599478779128/work
Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1648883617327/work
certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1725278078093/work/certifi
cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1666183775483/work
cfgv @ file:///home/conda/feedstock_root/build_artifacts/cfgv_1629909281805/work
charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1728479282467/work
cli-ui==0.10.3
click @ file:///home/conda/feedstock_root/build_artifacts/click_1651215140632/work
colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1666700638685/work
coverage==7.2.7
cryptography @ file:///home/conda/feedstock_root/build_artifacts/cryptography_1666563371538/work
dataclasses @ file:///home/conda/feedstock_root/build_artifacts/dataclasses_1628958434797/work
distlib @ file:///home/conda/feedstock_root/build_artifacts/distlib_1728557174656/work
docopt==0.6.2
docutils @ file:///home/conda/feedstock_root/build_artifacts/docutils_1610127754852/work
exceptiongroup==1.2.2
execnet==2.0.2
fair-research-login @ file:///home/conda/feedstock_root/build_artifacts/fair-research-login_1606879693149/work
filelock @ file:///home/conda/feedstock_root/build_artifacts/filelock_1726613473834/work
flake8 @ file:///home/conda/feedstock_root/build_artifacts/flake8_1601874335748/work
flake8-isort @ file:///home/conda/feedstock_root/build_artifacts/flake8-isort_1623224733929/work
globus-sdk @ file:///home/conda/feedstock_root/build_artifacts/globus-sdk_1612297651787/work
identify @ file:///home/conda/feedstock_root/build_artifacts/identify_1732589372185/work
idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1726459485162/work
imagesize @ file:///home/conda/feedstock_root/build_artifacts/imagesize_1656939531508/work
importlib-metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1653252814274/work
iniconfig==2.0.0
isort @ file:///home/conda/feedstock_root/build_artifacts/isort_1675293796116/work
jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1715127149914/work
MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1648737551960/work
mccabe==0.6.1
mypy @ file:///home/conda/feedstock_root/build_artifacts/mypy_1602270162469/work
mypy-extensions @ file:///home/conda/feedstock_root/build_artifacts/mypy_extensions_1649013329265/work
nodeenv @ file:///home/conda/feedstock_root/build_artifacts/nodeenv_1717585263558/work
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1696202382185/work
pathspec @ file:///home/conda/feedstock_root/build_artifacts/pathspec_1702249949303/work
platformdirs @ file:///home/conda/feedstock_root/build_artifacts/platformdirs_1696272223550/work
pluggy==1.2.0
pre-commit @ file:///home/conda/feedstock_root/build_artifacts/pre-commit_1612792544638/work
psutil @ file:///home/conda/feedstock_root/build_artifacts/psutil_1666155398032/work
pycodestyle @ file:///home/conda/feedstock_root/build_artifacts/pycodestyle_1589305246696/work
pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1636257122734/work
pyflakes==2.2.0
pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1700607939962/work
PyJWT==1.7.1
PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1648857264451/work
pytest==7.4.4
pytest-asyncio==0.21.2
pytest-cov==4.1.0
pytest-mock==3.11.1
pytest-xdist==3.5.0
pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1726055524169/work
PyYAML @ file:///home/conda/feedstock_root/build_artifacts/pyyaml_1648757092905/work
regex @ file:///home/conda/feedstock_root/build_artifacts/regex_1663068393899/work
requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1716354486713/work
schema==0.7.7
six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work
snowballstemmer @ file:///home/conda/feedstock_root/build_artifacts/snowballstemmer_1637143057757/work
Sphinx @ file:///home/conda/feedstock_root/build_artifacts/sphinx_1613491625878/work
sphinx-multiversion==0.2.4
sphinx-rtd-theme @ file:///home/conda/feedstock_root/build_artifacts/sphinx_rtd_theme_1609854399947/work
sphinxcontrib-applehelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-applehelp_1674487779667/work
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-htmlhelp_1675256494457/work
sphinxcontrib-jsmath @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-jsmath_1691604704163/work
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-serializinghtml_1649380998999/work
tabulate==0.8.10
tbump==6.3.2
testfixtures @ file:///home/conda/feedstock_root/build_artifacts/testfixtures_1646144015818/work
toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1604308577558/work
tomli==2.0.1
tomlkit==0.12.5
typed-ast @ file:///home/conda/feedstock_root/build_artifacts/typed-ast_1635853218843/work
typing-extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1688315532570/work
ukkonen @ file:///home/conda/feedstock_root/build_artifacts/ukkonen_1649407025308/work
Unidecode==1.3.8
urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1708239446578/work
virtualenv @ file:///home/conda/feedstock_root/build_artifacts/virtualenv_1681949329741/work
zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1677313463193/work
-e git+https://github.com/E3SM-Project/zstash.git@0b18fd20031b48b91f604b886fb66b666284de43#egg=zstash
| name: zstash
channels:
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_gnu
- alabaster=0.7.13=pyhd8ed1ab_0
- appdirs=1.4.4=pyh9f0ad1d_0
- babel=2.14.0=pyhd8ed1ab_0
- black=20.8b1=py_1
- brotli-python=1.0.9=py37hd23a5d3_7
- ca-certificates=2025.1.31=hbcca054_0
- certifi=2024.8.30=pyhd8ed1ab_0
- cffi=1.15.1=py37h43b0acd_1
- cfgv=3.3.1=pyhd8ed1ab_0
- charset-normalizer=3.4.0=pyhd8ed1ab_0
- click=8.1.3=py37h89c1867_0
- colorama=0.4.6=pyhd8ed1ab_0
- cryptography=38.0.2=py37h5994e8b_1
- dataclasses=0.8=pyhc8e2a94_3
- distlib=0.3.9=pyhd8ed1ab_0
- docutils=0.16=py37h89c1867_3
- fair-research-login=0.2.0=pyhd3deb0d_0
- filelock=3.16.1=pyhd8ed1ab_0
- flake8=3.8.4=py_0
- flake8-isort=4.0.0=pyhd8ed1ab_1
- globus-sdk=2.0.1=pyhd8ed1ab_0
- identify=2.6.3=pyhd8ed1ab_0
- idna=3.10=pyhd8ed1ab_0
- imagesize=1.4.1=pyhd8ed1ab_0
- importlib-metadata=4.11.4=py37h89c1867_0
- importlib_metadata=4.11.4=hd8ed1ab_0
- isort=5.11.5=pyhd8ed1ab_0
- jinja2=3.1.4=pyhd8ed1ab_0
- ld_impl_linux-64=2.43=h712a8e2_4
- libffi=3.4.6=h2dba641_0
- libgcc=14.2.0=h767d61c_2
- libgcc-ng=14.2.0=h69a702a_2
- libgomp=14.2.0=h767d61c_2
- liblzma=5.6.4=hb9d3cd8_0
- liblzma-devel=5.6.4=hb9d3cd8_0
- libnsl=2.0.1=hd590300_0
- libsqlite=3.49.1=hee588c1_2
- libstdcxx=14.2.0=h8f9b012_2
- libstdcxx-ng=14.2.0=h4852527_2
- libzlib=1.3.1=hb9d3cd8_2
- markupsafe=2.1.1=py37h540881e_1
- mccabe=0.6.1=py_1
- mypy=0.790=py_0
- mypy_extensions=0.4.3=py37h89c1867_5
- ncurses=6.5=h2d0b736_3
- nodeenv=1.9.1=pyhd8ed1ab_0
- openssl=3.4.1=h7b32b05_0
- packaging=23.2=pyhd8ed1ab_0
- pathspec=0.12.1=pyhd8ed1ab_0
- pip=21.0.1=pyhd8ed1ab_0
- platformdirs=3.11.0=pyhd8ed1ab_0
- pre-commit=2.10.1=py37h89c1867_0
- psutil=5.9.3=py37h540881e_0
- pycodestyle=2.6.0=pyh9f0ad1d_0
- pycparser=2.21=pyhd8ed1ab_0
- pyflakes=2.2.0=pyh9f0ad1d_0
- pygments=2.17.2=pyhd8ed1ab_0
- pyjwt=1.7.1=py_0
- pysocks=1.7.1=py37h89c1867_5
- python=3.7.10=hf930737_104_cpython
- python_abi=3.7=4_cp37m
- pytz=2024.2=pyhd8ed1ab_0
- pyyaml=6.0=py37h540881e_4
- readline=8.2=h8c095d6_2
- regex=2022.9.13=py37h540881e_0
- requests=2.32.2=pyhd8ed1ab_0
- setuptools=69.0.3=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- snowballstemmer=2.2.0=pyhd8ed1ab_0
- sphinx=3.5.1=pyhd8ed1ab_0
- sphinx_rtd_theme=0.5.1=pyhd3deb0d_0
- sphinxcontrib-applehelp=1.0.4=pyhd8ed1ab_0
- sphinxcontrib-devhelp=1.0.2=py_0
- sphinxcontrib-htmlhelp=2.0.1=pyhd8ed1ab_0
- sphinxcontrib-jsmath=1.0.1=pyhd8ed1ab_0
- sphinxcontrib-qthelp=1.0.3=py_0
- sphinxcontrib-serializinghtml=1.1.5=pyhd8ed1ab_2
- sqlite=3.49.1=h9eae976_2
- testfixtures=6.18.5=pyhd8ed1ab_0
- tk=8.6.13=noxft_h4845f30_101
- toml=0.10.2=pyhd8ed1ab_0
- typed-ast=1.4.3=py37h5e8e339_1
- typing-extensions=4.7.1=hd8ed1ab_0
- typing_extensions=4.7.1=pyha770c72_0
- ukkonen=1.0.1=py37h7cecad7_2
- urllib3=2.2.1=pyhd8ed1ab_0
- virtualenv=20.21.1=pyhd8ed1ab_0
- wheel=0.42.0=pyhd8ed1ab_0
- xz=5.6.4=hbcc6ac9_0
- xz-gpl-tools=5.6.4=hbcc6ac9_0
- xz-tools=5.6.4=hb9d3cd8_0
- yaml=0.2.5=h7f98852_2
- zipp=3.15.0=pyhd8ed1ab_0
- pip:
- attrs==19.3.0
- cli-ui==0.10.3
- coverage==7.2.7
- docopt==0.6.2
- exceptiongroup==1.2.2
- execnet==2.0.2
- iniconfig==2.0.0
- pluggy==1.2.0
- pytest==7.4.4
- pytest-asyncio==0.21.2
- pytest-cov==4.1.0
- pytest-mock==3.11.1
- pytest-xdist==3.5.0
- schema==0.7.7
- sphinx-multiversion==0.2.4
- tabulate==0.8.10
- tbump==6.3.2
- tomli==2.0.1
- tomlkit==0.12.5
- unidecode==1.3.8
prefix: /opt/conda/envs/zstash
| [
"tests/test_extract.py::TestExtract::testExtractFile"
] | [] | [
"tests/test_extract.py::TestExtract::testExtractCache",
"tests/test_extract.py::TestExtract::testExtractKeep",
"tests/test_extract.py::TestExtract::testExtractTars",
"tests/test_extract.py::TestExtract::testExtractVerbose",
"tests/test_extract_parallel.py::TestExtractParallel::testExtractParallel",
"tests/test_extract_parallel.py::TestExtractParallel::testExtractParallelTars"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,225 | 182 | [
"zstash/extract.py"
] |
|
pymc-devs__pymc-5476 | 2db28f0b2c0497124f118d7a27728a729c6ca8e0 | 2022-02-15 15:39:51 | c8525eb1ff771e8a60b0b5022a9938594f48dc18 | codecov[bot]: # [Codecov](https://codecov.io/gh/pymc-devs/pymc/pull/5476?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs) Report
> Merging [#5476](https://codecov.io/gh/pymc-devs/pymc/pull/5476?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs) (d82c05e) into [main](https://codecov.io/gh/pymc-devs/pymc/commit/3d958ad23a9663bef2e1752221587b0deb4739f1?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs) (3d958ad) will **decrease** coverage by `36.94%`.
> The diff coverage is `0.00%`.
[](https://codecov.io/gh/pymc-devs/pymc/pull/5476?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs)
```diff
@@ Coverage Diff @@
## main #5476 +/- ##
===========================================
- Coverage 80.23% 43.28% -36.95%
===========================================
Files 82 81 -1
Lines 13941 14202 +261
===========================================
- Hits 11185 6148 -5037
- Misses 2756 8054 +5298
```
| [Impacted Files](https://codecov.io/gh/pymc-devs/pymc/pull/5476?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs) | Coverage Δ | |
|---|---|---|
| [pymc/distributions/multivariate.py](https://codecov.io/gh/pymc-devs/pymc/pull/5476/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9kaXN0cmlidXRpb25zL211bHRpdmFyaWF0ZS5weQ==) | `27.61% <0.00%> (-63.75%)` | :arrow_down: |
| [pymc/step\_methods/mlda.py](https://codecov.io/gh/pymc-devs/pymc/pull/5476/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9zdGVwX21ldGhvZHMvbWxkYS5weQ==) | `12.46% <0.00%> (-83.93%)` | :arrow_down: |
| [pymc/ode/utils.py](https://codecov.io/gh/pymc-devs/pymc/pull/5476/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9vZGUvdXRpbHMucHk=) | `17.85% <0.00%> (-82.15%)` | :arrow_down: |
| [pymc/smc/smc.py](https://codecov.io/gh/pymc-devs/pymc/pull/5476/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9zbWMvc21jLnB5) | `17.99% <0.00%> (-79.91%)` | :arrow_down: |
| [pymc/bart/pgbart.py](https://codecov.io/gh/pymc-devs/pymc/pull/5476/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9iYXJ0L3BnYmFydC5weQ==) | `16.66% <0.00%> (-78.94%)` | :arrow_down: |
| [pymc/variational/updates.py](https://codecov.io/gh/pymc-devs/pymc/pull/5476/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy92YXJpYXRpb25hbC91cGRhdGVzLnB5) | `11.33% <0.00%> (-76.36%)` | :arrow_down: |
| [pymc/bart/utils.py](https://codecov.io/gh/pymc-devs/pymc/pull/5476/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9iYXJ0L3V0aWxzLnB5) | `6.52% <0.00%> (-76.09%)` | :arrow_down: |
| [pymc/gp/gp.py](https://codecov.io/gh/pymc-devs/pymc/pull/5476/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9ncC9ncC5weQ==) | `18.68% <0.00%> (-74.50%)` | :arrow_down: |
| [pymc/step\_methods/slicer.py](https://codecov.io/gh/pymc-devs/pymc/pull/5476/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9zdGVwX21ldGhvZHMvc2xpY2VyLnB5) | `22.53% <0.00%> (-73.18%)` | :arrow_down: |
| [pymc/tuning/starting.py](https://codecov.io/gh/pymc-devs/pymc/pull/5476/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy90dW5pbmcvc3RhcnRpbmcucHk=) | `19.51% <0.00%> (-73.18%)` | :arrow_down: |
| ... and [57 more](https://codecov.io/gh/pymc-devs/pymc/pull/5476/diff?src=pr&el=tree-more&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs) | |
markvrma: Added new test for higher dimensions.
I couldn't remove:
```py
if p.ndim > 1:
n = at.shape_padright(n)
if (p.ndim == 1) and (n.ndim > 0):
n = at.shape_padright(n)
p = at.shape_padleft(p)
```
as existing tests fail in `test_distributions_moments.py`.
ricardoV94: > Added new test for higher dimensions.
>
> I couldn't remove:
>
> ```python
> if p.ndim > 1:
> n = at.shape_padright(n)
> if (p.ndim == 1) and (n.ndim > 0):
> n = at.shape_padright(n)
> p = at.shape_padleft(p)
> ```
>
> as existing tests fail in `test_distributions_moments.py`.
We might need something else, instead of just removing these lines. Or perhaps those lines also give the right result for higher dimensions that are now supported, but I would be surprised if that was the case, since they seem to have specialized logic for <= 2D parameters.
I would brute-force with local tests where we try different combinations of `p` and `n` with shapes ranging from scalar to 3D and see if this function returns the correct output. One way might be to compare with a `np.vectorized` version of `get_moment` that works for the base case (1d vector of `p` and scalar `n`). There are some examples where we use this approach for testing the `logp` here:
https://github.com/pymc-devs/pymc/blob/c892317aba0663df0d250c1813b2ca2fe3460774/pymc/tests/test_distributions.py#L2228-L2251
and here:
https://github.com/pymc-devs/pymc/blob/c892317aba0663df0d250c1813b2ca2fe3460774/pymc/tests/test_distributions.py#L335-L347
https://github.com/pymc-devs/pymc/blob/c892317aba0663df0d250c1813b2ca2fe3460774/pymc/tests/test_distributions.py#L2297-L2319
ricardoV94: Also did you figure out what exactly is failing? Looking at the DirichletMultinomial mode it seems like it should be exactly the same, except that here we already have the `p`, and can skip the first step of `p = a / at.sum(a, axis=-1)` in https://github.com/pymc-devs/pymc/pull/5225/files Unless there's an error in the implementation of the DirichletMultinomial moment as well
ricardoV94: @markvrma It seems than it was enough to add one dimension to the right of `n`. The old and new tests were quite helpful to figure this out, and also show that the `DirichletMultinomial` moment would fail for some configurations. | diff --git a/pymc/distributions/multivariate.py b/pymc/distributions/multivariate.py
index de500a335..d8d564533 100644
--- a/pymc/distributions/multivariate.py
+++ b/pymc/distributions/multivariate.py
@@ -56,7 +56,7 @@ from pymc.distributions.dist_math import (
logpow,
multigammaln,
)
-from pymc.distributions.distribution import Continuous, Discrete
+from pymc.distributions.distribution import Continuous, Discrete, get_moment
from pymc.distributions.shape_utils import (
broadcast_dist_samples_to,
rv_size_is_none,
@@ -558,11 +558,7 @@ class Multinomial(Discrete):
return super().dist([n, p], *args, **kwargs)
def get_moment(rv, size, n, p):
- if p.ndim > 1:
- n = at.shape_padright(n)
- if (p.ndim == 1) & (n.ndim > 0):
- n = at.shape_padright(n)
- p = at.shape_padleft(p)
+ n = at.shape_padright(n)
mode = at.round(n * p)
diff = n - at.sum(mode, axis=-1, keepdims=True)
inc_bool_arr = at.abs_(diff) > 0
@@ -682,21 +678,8 @@ class DirichletMultinomial(Discrete):
return super().dist([n, a], **kwargs)
def get_moment(rv, size, n, a):
- p = a / at.sum(a, axis=-1)
- mode = at.round(n * p)
- diff = n - at.sum(mode, axis=-1, keepdims=True)
- inc_bool_arr = at.abs_(diff) > 0
- mode = at.inc_subtensor(mode[inc_bool_arr.nonzero()], diff[inc_bool_arr.nonzero()])
-
- # Reshape mode according to dimensions implied by the parameters
- # This can include axes of length 1
- _, p_bcast = broadcast_params([n, p], ndims_params=[0, 1])
- mode = at.reshape(mode, p_bcast.shape)
-
- if not rv_size_is_none(size):
- output_size = at.concatenate([size, [p.shape[-1]]])
- mode = at.full(output_size, mode)
- return mode
+ p = a / at.sum(a, axis=-1, keepdims=True)
+ return get_moment(Multinomial.dist(n=n, p=p, size=size))
def logp(value, n, a):
"""
diff --git a/pymc/distributions/shape_utils.py b/pymc/distributions/shape_utils.py
index c761a2caf..5932f57d8 100644
--- a/pymc/distributions/shape_utils.py
+++ b/pymc/distributions/shape_utils.py
@@ -22,7 +22,7 @@ from typing import TYPE_CHECKING, Optional, Sequence, Tuple, Union, cast
import numpy as np
-from aesara.graph.basic import Constant, Variable
+from aesara.graph.basic import Variable
from aesara.tensor.var import TensorVariable
from typing_extensions import TypeAlias
@@ -618,4 +618,4 @@ def find_size(
def rv_size_is_none(size: Variable) -> bool:
"""Check wether an rv size is None (ie., at.Constant([]))"""
- return isinstance(size, Constant) and size.data.size == 0
+ return size.type.shape == (0,)
| Generalize multinomial moment to arbitrary dimensions
The multinomial distribution is not restricted to 2D, but it's moment assumes this is the case. This should be similar to https://github.com/pymc-devs/pymc/pull/5225 | pymc-devs/pymc | diff --git a/pymc/tests/test_distributions_moments.py b/pymc/tests/test_distributions_moments.py
index 0727963a9..433dda5a3 100644
--- a/pymc/tests/test_distributions_moments.py
+++ b/pymc/tests/test_distributions_moments.py
@@ -133,6 +133,9 @@ def test_rv_size_is_none():
rv = Normal.dist(0, 1, size=None)
assert rv_size_is_none(rv.owner.inputs[1])
+ rv = Normal.dist(0, 1, size=())
+ assert rv_size_is_none(rv.owner.inputs[1])
+
rv = Normal.dist(0, 1, size=1)
assert not rv_size_is_none(rv.owner.inputs[1])
@@ -1305,13 +1308,13 @@ def test_polyagamma_moment(h, z, size, expected):
np.array([[4, 6, 0, 0], [4, 2, 2, 2]]),
),
(
- np.array([[0.25, 0.25, 0.25, 0.25], [0.26, 0.26, 0.26, 0.22]]),
- np.array([1, 10]),
- None,
- np.array([[1, 0, 0, 0], [2, 3, 3, 2]]),
+ np.array([0.3, 0.6, 0.05, 0.05]),
+ np.array([2, 10]),
+ (1, 2),
+ np.array([[[1, 1, 0, 0], [4, 6, 0, 0]]]),
),
(
- np.array([0.26, 0.26, 0.26, 0.22]),
+ np.array([[0.25, 0.25, 0.25, 0.25], [0.26, 0.26, 0.26, 0.22]]),
np.array([1, 10]),
None,
np.array([[1, 0, 0, 0], [2, 3, 3, 2]]),
@@ -1319,8 +1322,8 @@ def test_polyagamma_moment(h, z, size, expected):
(
np.array([[0.25, 0.25, 0.25, 0.25], [0.26, 0.26, 0.26, 0.22]]),
np.array([1, 10]),
- (2, 2),
- np.full((2, 2, 4), [[1, 0, 0, 0], [2, 3, 3, 2]]),
+ (3, 2),
+ np.full((3, 2, 4), [[1, 0, 0, 0], [2, 3, 3, 2]]),
),
],
)
@@ -1467,10 +1470,16 @@ def test_lkjcholeskycov_moment(n, eta, size, expected):
(np.array([3, 6, 0.5, 0.5]), 2, None, np.array([1, 1, 0, 0])),
(np.array([30, 60, 5, 5]), 10, None, np.array([4, 6, 0, 0])),
(
- np.array([[26, 26, 26, 22]]), # Dim: 1 x 4
- np.array([[1], [10]]), # Dim: 2 x 1
+ np.array([[30, 60, 5, 5], [26, 26, 26, 22]]),
+ 10,
+ (1, 2),
+ np.array([[[4, 6, 0, 0], [2, 3, 3, 2]]]),
+ ),
+ (
+ np.array([26, 26, 26, 22]),
+ np.array([1, 10]),
None,
- np.array([[[1, 0, 0, 0]], [[2, 3, 3, 2]]]), # Dim: 2 x 1 x 4
+ np.array([[1, 0, 0, 0], [2, 3, 3, 2]]),
),
(
np.array([[26, 26, 26, 22]]), # Dim: 1 x 4
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 2
} | 4.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.10",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aeppl==0.0.27
aesara==2.5.1
arviz==0.21.0
cachetools==5.5.2
cloudpickle==3.1.1
cons==0.4.6
contourpy==1.3.1
coverage==7.8.0
cycler==0.12.1
etuples==0.3.9
exceptiongroup==1.2.2
fastprogress==1.0.3
filelock==3.18.0
fonttools==4.56.0
h5netcdf==1.6.1
h5py==3.13.0
iniconfig==2.1.0
kiwisolver==1.4.8
logical-unification==0.4.6
matplotlib==3.10.1
miniKanren==1.0.3
multipledispatch==1.0.0
numpy==2.2.4
packaging==24.2
pandas==2.2.3
pillow==11.1.0
pluggy==1.5.0
-e git+https://github.com/pymc-devs/pymc.git@2db28f0b2c0497124f118d7a27728a729c6ca8e0#egg=pymc
pyparsing==3.2.3
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
scipy==1.15.2
six==1.17.0
tomli==2.2.1
toolz==1.0.0
typing_extensions==4.13.0
tzdata==2025.2
xarray==2025.3.1
xarray-einstats==0.8.0
| name: pymc
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aeppl==0.0.27
- aesara==2.5.1
- arviz==0.21.0
- cachetools==5.5.2
- cloudpickle==3.1.1
- cons==0.4.6
- contourpy==1.3.1
- coverage==7.8.0
- cycler==0.12.1
- etuples==0.3.9
- exceptiongroup==1.2.2
- fastprogress==1.0.3
- filelock==3.18.0
- fonttools==4.56.0
- h5netcdf==1.6.1
- h5py==3.13.0
- iniconfig==2.1.0
- kiwisolver==1.4.8
- logical-unification==0.4.6
- matplotlib==3.10.1
- minikanren==1.0.3
- multipledispatch==1.0.0
- numpy==2.2.4
- packaging==24.2
- pandas==2.2.3
- pillow==11.1.0
- pluggy==1.5.0
- pymc==4.0.0b4
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scipy==1.15.2
- six==1.17.0
- tomli==2.2.1
- toolz==1.0.0
- typing-extensions==4.13.0
- tzdata==2025.2
- xarray==2025.3.1
- xarray-einstats==0.8.0
prefix: /opt/conda/envs/pymc
| [
"pymc/tests/test_distributions_moments.py::test_rv_size_is_none"
] | [
"pymc/tests/test_distributions_moments.py::test_halfstudentt_moment[1-1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_halfstudentt_moment[1-sigma2-size2-expected2]",
"pymc/tests/test_distributions_moments.py::test_halfstudentt_moment[nu3-1-None-expected3]",
"pymc/tests/test_distributions_moments.py::test_beta_binomial_moment[10-1-1-None-5]",
"pymc/tests/test_distributions_moments.py::test_beta_binomial_moment[10-1-1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_beta_binomial_moment[10-1-beta2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_beta_binomial_moment[10-1-beta3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_chisquared_moment[nu2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_studentt_moment[mu2-10-sigma2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_studentt_moment[mu3-10-sigma3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_halfcauchy_moment[1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_halfcauchy_moment[beta2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_halfcauchy_moment[beta3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_pareto_moment[2-1-None-1.4142135623730951]",
"pymc/tests/test_distributions_moments.py::test_pareto_moment[2-1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_pareto_moment[alpha2-m2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_pareto_moment[alpha3-m3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_wald_moment[mu4-None-phi4-size4-expected4]",
"pymc/tests/test_distributions_moments.py::test_binomial_moment[7-0.7-None-5]",
"pymc/tests/test_distributions_moments.py::test_binomial_moment[7-0.3-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_binomial_moment[10-p2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_binomial_moment[10-p3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_poisson_moment[2.7-None-2]",
"pymc/tests/test_distributions_moments.py::test_poisson_moment[2.3-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_poisson_moment[mu2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_poisson_moment[mu3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_negative_binomial_moment[10-0.7-None-4]",
"pymc/tests/test_distributions_moments.py::test_negative_binomial_moment[10-0.7-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_negative_binomial_moment[n2-p2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_negative_binomial_moment[10-p3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_zero_inflated_poisson_moment[0.9-3.0-None-3]",
"pymc/tests/test_distributions_moments.py::test_zero_inflated_poisson_moment[0.8-2.9-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_zero_inflated_poisson_moment[0.2-mu2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_zero_inflated_poisson_moment[0.2-mu3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_zero_inflated_binomial_moment[0.8-7-0.7-None-4]",
"pymc/tests/test_distributions_moments.py::test_zero_inflated_binomial_moment[0.8-7-0.3-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_zero_inflated_binomial_moment[0.4-25-p2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_zero_inflated_binomial_moment[0.4-25-p3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_geometric_moment[0.5-None-2]",
"pymc/tests/test_distributions_moments.py::test_geometric_moment[0.2-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_geometric_moment[p2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_geometric_moment[p3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_hyper_geometric_moment[50-10-20-None-4]",
"pymc/tests/test_distributions_moments.py::test_hyper_geometric_moment[50-10-23-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_hyper_geometric_moment[50-10-n2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_hyper_geometric_moment[50-10-n3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_discrete_uniform_moment[1-5-None-3]",
"pymc/tests/test_distributions_moments.py::test_discrete_uniform_moment[1-5-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_discrete_uniform_moment[1-upper2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_discrete_uniform_moment[1-upper3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_discrete_weibull_moment[0.5-0.5-None-0]",
"pymc/tests/test_distributions_moments.py::test_discrete_weibull_moment[0.6-0.1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_discrete_weibull_moment[q2-0.42-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_discrete_weibull_moment[q3-beta3-None-expected3]",
"pymc/tests/test_distributions_moments.py::test_dirichlet_moment[a0-None-expected0]",
"pymc/tests/test_distributions_moments.py::test_dirichlet_moment[a1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_dirichlet_moment[a2-size2-expected2]",
"pymc/tests/test_distributions_moments.py::test_dirichlet_moment[a3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_logitnormal_moment[1-2-None-0.7310585786300049]",
"pymc/tests/test_distributions_moments.py::test_logitnormal_moment[0-sigma1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_logitnormal_moment[mu2-1-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_logitnormal_moment[1-5-4-expected3]",
"pymc/tests/test_distributions_moments.py::test_logitnormal_moment[mu4-sigma4-size4-expected4]",
"pymc/tests/test_distributions_moments.py::test_car_moment[mu0-None-expected0]",
"pymc/tests/test_distributions_moments.py::test_car_moment[mu1-6-expected1]",
"pymc/tests/test_distributions_moments.py::test_car_moment[mu2-size2-expected2]",
"pymc/tests/test_distributions_moments.py::test_car_moment[mu3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_moyal_moment[4.0-sigma1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_moyal_moment[mu2-1-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_moyal_moment[mu3-sigma3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_mvstudentt_moment[2-mu0-cov0-None-expected0]",
"pymc/tests/test_distributions_moments.py::test_mvstudentt_moment[2-mu1-cov1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_mvstudentt_moment[2-mu2-cov2-2-expected2]",
"pymc/tests/test_distributions_moments.py::test_mvstudentt_moment[2-mu4-cov4-None-expected4]",
"pymc/tests/test_distributions_moments.py::test_skewnormal_moment[1.0-mu1-1.0-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_skewnormal_moment[alpha2-1-sigma2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_skewnormal_moment[alpha3-mu3-sigma3-None-expected3]",
"pymc/tests/test_distributions_moments.py::test_skewnormal_moment[alpha4-mu4-sigma4-size4-expected4]",
"pymc/tests/test_distributions_moments.py::test_asymmetriclaplace_moment[b2-1.0-mu2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_asymmetriclaplace_moment[b3-kappa3-mu3-None-expected3]",
"pymc/tests/test_distributions_moments.py::test_asymmetriclaplace_moment[b4-kappa4-mu4-size4-expected4]",
"pymc/tests/test_distributions_moments.py::test_matrixnormal_moment[mu0-rowchol0-colchol0-None-expected0]",
"pymc/tests/test_distributions_moments.py::test_matrixnormal_moment[mu1-rowchol1-colchol1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_matrixnormal_moment[mu2-rowchol2-colchol2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_stickbreakingweights_moment[3-11-None-expected0]",
"pymc/tests/test_distributions_moments.py::test_stickbreakingweights_moment[5-19-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_stickbreakingweights_moment[1-7-size2-expected2]",
"pymc/tests/test_distributions_moments.py::test_stickbreakingweights_moment[0.5-5-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_polyagamma_moment[1.0-0.0-None-0.25]",
"pymc/tests/test_distributions_moments.py::test_polyagamma_moment[1.0-z1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_polyagamma_moment[h2-z2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_polyagamma_moment[h3-z3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_multinomial_moment[p0-1-None-expected0]",
"pymc/tests/test_distributions_moments.py::test_multinomial_moment[p1-2-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_multinomial_moment[p2-10-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_multinomial_moment[p3-10-None-expected3]",
"pymc/tests/test_distributions_moments.py::test_multinomial_moment[p4-n4-size4-expected4]",
"pymc/tests/test_distributions_moments.py::test_multinomial_moment[p5-n5-None-expected5]",
"pymc/tests/test_distributions_moments.py::test_multinomial_moment[p6-n6-size6-expected6]",
"pymc/tests/test_distributions_moments.py::test_zero_inflated_negative_binomial_moment[0.2-10-3-None-2]",
"pymc/tests/test_distributions_moments.py::test_zero_inflated_negative_binomial_moment[0.2-10-4-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_zero_inflated_negative_binomial_moment[0.4-mu2-alpha2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_zero_inflated_negative_binomial_moment[psi3-mu3-alpha3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_kronecker_normal_moment[mu0-covs0-None-expected0]",
"pymc/tests/test_distributions_moments.py::test_kronecker_normal_moment[mu1-covs1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_kronecker_normal_moment[mu2-covs2-6-expected2]",
"pymc/tests/test_distributions_moments.py::test_kronecker_normal_moment[mu3-covs3-6-expected3]",
"pymc/tests/test_distributions_moments.py::test_kronecker_normal_moment[mu4-covs4-2-expected4]",
"pymc/tests/test_distributions_moments.py::test_lkjcholeskycov_moment[3-1-None-expected0]",
"pymc/tests/test_distributions_moments.py::test_lkjcholeskycov_moment[4-1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_lkjcholeskycov_moment[3-1-1-expected2]",
"pymc/tests/test_distributions_moments.py::test_lkjcholeskycov_moment[4-1-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_dirichlet_multinomial_moment[a0-1-None-expected0]",
"pymc/tests/test_distributions_moments.py::test_dirichlet_multinomial_moment[a1-2-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_dirichlet_multinomial_moment[a2-10-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_dirichlet_multinomial_moment[a3-10-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_dirichlet_multinomial_moment[a4-n4-None-expected4]",
"pymc/tests/test_distributions_moments.py::test_dirichlet_multinomial_moment[a5-n5-size5-expected5]"
] | [
"pymc/tests/test_distributions_moments.py::test_all_distributions_have_moments",
"pymc/tests/test_distributions_moments.py::test_flat_moment[None-0]",
"pymc/tests/test_distributions_moments.py::test_flat_moment[5-expected1]",
"pymc/tests/test_distributions_moments.py::test_flat_moment[size2-expected2]",
"pymc/tests/test_distributions_moments.py::test_halfflat_moment[None-1]",
"pymc/tests/test_distributions_moments.py::test_halfflat_moment[5-expected1]",
"pymc/tests/test_distributions_moments.py::test_halfflat_moment[size2-expected2]",
"pymc/tests/test_distributions_moments.py::test_uniform_moment[-1-1-None-0]",
"pymc/tests/test_distributions_moments.py::test_uniform_moment[-1-1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_uniform_moment[0-upper2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_uniform_moment[0-upper3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_normal_moment[0-1-None-0]",
"pymc/tests/test_distributions_moments.py::test_normal_moment[0-sigma1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_normal_moment[mu2-1-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_normal_moment[mu3-sigma3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_halfnormal_moment[1-None-1]",
"pymc/tests/test_distributions_moments.py::test_halfnormal_moment[1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_halfnormal_moment[sigma2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_halfnormal_moment[sigma3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_halfstudentt_moment[1-1-None-1]",
"pymc/tests/test_distributions_moments.py::test_truncatednormal_moment[0.9-1--5-5-None-0]",
"pymc/tests/test_distributions_moments.py::test_truncatednormal_moment[1-sigma1--10-inf-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_truncatednormal_moment[mu2-1-None-10-size2-expected2]",
"pymc/tests/test_distributions_moments.py::test_truncatednormal_moment[1-1-lower3-10-None-expected3]",
"pymc/tests/test_distributions_moments.py::test_bernoulli_moment[0.3-None-0]",
"pymc/tests/test_distributions_moments.py::test_bernoulli_moment[0.9-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_bernoulli_moment[p2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_bernoulli_moment[p3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_beta_moment[1-1-None-0.5]",
"pymc/tests/test_distributions_moments.py::test_beta_moment[1-1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_beta_moment[1-beta2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_beta_moment[1-beta3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_chisquared_moment[1-None-1]",
"pymc/tests/test_distributions_moments.py::test_chisquared_moment[1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_exponential_moment[2-None-0.5]",
"pymc/tests/test_distributions_moments.py::test_exponential_moment[2-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_exponential_moment[lam2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_exponential_moment[lam3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_laplace_moment[0-1-None-0]",
"pymc/tests/test_distributions_moments.py::test_laplace_moment[0-b1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_laplace_moment[mu2-1-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_laplace_moment[mu3-b3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_studentt_moment[0-1-1-None-0]",
"pymc/tests/test_distributions_moments.py::test_studentt_moment[0-nu1-1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_cauchy_moment[0-1-None-0]",
"pymc/tests/test_distributions_moments.py::test_cauchy_moment[0-beta1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_cauchy_moment[alpha2-1-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_cauchy_moment[alpha3-beta3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_kumaraswamy_moment[1-1-None-0.5]",
"pymc/tests/test_distributions_moments.py::test_kumaraswamy_moment[1-1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_kumaraswamy_moment[1-b2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_kumaraswamy_moment[a3-1-None-expected3]",
"pymc/tests/test_distributions_moments.py::test_kumaraswamy_moment[1-b4-size4-expected4]",
"pymc/tests/test_distributions_moments.py::test_lognormal_moment[0-1-None-1.6487212707001282]",
"pymc/tests/test_distributions_moments.py::test_lognormal_moment[0-1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_lognormal_moment[mu2-1-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_lognormal_moment[mu3-sigma3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_halfcauchy_moment[1-None-1]",
"pymc/tests/test_distributions_moments.py::test_gamma_moment[1-1-None-1]",
"pymc/tests/test_distributions_moments.py::test_gamma_moment[1-1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_gamma_moment[alpha2-1-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_gamma_moment[alpha3-beta3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_inverse_gamma_moment[5-1-None-0.25]",
"pymc/tests/test_distributions_moments.py::test_inverse_gamma_moment[0.5-1-None-0.6666666666666666]",
"pymc/tests/test_distributions_moments.py::test_inverse_gamma_moment[5-1-5-expected2]",
"pymc/tests/test_distributions_moments.py::test_inverse_gamma_moment[alpha3-1-None-expected3]",
"pymc/tests/test_distributions_moments.py::test_vonmises_moment[0-1-None-0]",
"pymc/tests/test_distributions_moments.py::test_vonmises_moment[0-kappa1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_vonmises_moment[mu2-0.5-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_vonmises_moment[mu3-kappa3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_wald_moment[2-None-None-None-2]",
"pymc/tests/test_distributions_moments.py::test_wald_moment[None-1-1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_wald_moment[1-None-phi2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_wald_moment[3-lam3-None-None-expected3]",
"pymc/tests/test_distributions_moments.py::test_weibull_moment[1-1-None-1]",
"pymc/tests/test_distributions_moments.py::test_weibull_moment[1-1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_weibull_moment[alpha2-1-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_weibull_moment[alpha3-beta3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_constant_moment[1-None-1]",
"pymc/tests/test_distributions_moments.py::test_constant_moment[1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_constant_moment[c2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_logistic_moment[1-1-None-1]",
"pymc/tests/test_distributions_moments.py::test_logistic_moment[1-1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_logistic_moment[2-s2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_logistic_moment[mu3-s3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_exgaussian_moment[1-1-1-None-2]",
"pymc/tests/test_distributions_moments.py::test_exgaussian_moment[1-1-sigma1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_exgaussian_moment[1-1-3-5-expected2]",
"pymc/tests/test_distributions_moments.py::test_exgaussian_moment[1-nu3-5-None-expected3]",
"pymc/tests/test_distributions_moments.py::test_exgaussian_moment[1-nu4-1-size4-expected4]",
"pymc/tests/test_distributions_moments.py::test_gumbel_moment[0-2-None-1.1544313298030657]",
"pymc/tests/test_distributions_moments.py::test_gumbel_moment[1-beta1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_gumbel_moment[mu2-2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_gumbel_moment[1-2-5-expected3]",
"pymc/tests/test_distributions_moments.py::test_gumbel_moment[mu4-beta4-size4-expected4]",
"pymc/tests/test_distributions_moments.py::test_triangular_moment[1-0-5-None-2]",
"pymc/tests/test_distributions_moments.py::test_triangular_moment[3-lower1-upper1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_triangular_moment[c2--3-3-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_triangular_moment[3--3-6-5-expected3]",
"pymc/tests/test_distributions_moments.py::test_triangular_moment[c4-lower4-upper4-size4-expected4]",
"pymc/tests/test_distributions_moments.py::test_categorical_moment[p0-None-2]",
"pymc/tests/test_distributions_moments.py::test_categorical_moment[p1-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_categorical_moment[p2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_categorical_moment[p3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_interpolated_moment[x_points0-pdf_points0-None-0.2]",
"pymc/tests/test_distributions_moments.py::test_interpolated_moment[x_points1-pdf_points1-None-1.5458937198067635]",
"pymc/tests/test_distributions_moments.py::test_interpolated_moment[x_points2-pdf_points2-size2-expected2]",
"pymc/tests/test_distributions_moments.py::test_interpolated_moment[x_points3-pdf_points3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_mv_normal_moment[mu0-cov0-None-expected0]",
"pymc/tests/test_distributions_moments.py::test_mv_normal_moment[mu1-cov1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_mv_normal_moment[mu2-cov2-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_mv_normal_moment[mu3-cov3-None-expected3]",
"pymc/tests/test_distributions_moments.py::test_mv_normal_moment[mu4-cov4-size4-expected4]",
"pymc/tests/test_distributions_moments.py::test_mv_normal_moment[mu5-cov5-5-expected5]",
"pymc/tests/test_distributions_moments.py::test_mv_normal_moment[mu6-cov6-size6-expected6]",
"pymc/tests/test_distributions_moments.py::test_mv_normal_moment[mu7-cov7-size7-expected7]",
"pymc/tests/test_distributions_moments.py::test_moyal_moment[4.0-3.0-None-7.8110885363844345]",
"pymc/tests/test_distributions_moments.py::test_mvstudentt_moment[2-mu3-cov3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_mvstudentt_moment[2-mu5-cov5-size5-expected5]",
"pymc/tests/test_distributions_moments.py::test_mvstudentt_moment[2-mu6-cov6-size6-expected6]",
"pymc/tests/test_distributions_moments.py::test_skewnormal_moment[1.0-1.0-1.0-None-1.56418958]",
"pymc/tests/test_distributions_moments.py::test_asymmetriclaplace_moment[1.0-1.0-1.0-None-1.0]",
"pymc/tests/test_distributions_moments.py::test_asymmetriclaplace_moment[1.0-kappa1-1.0-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_matrixnormal_moment[mu3-rowchol3-colchol3-2-expected3]",
"pymc/tests/test_distributions_moments.py::test_matrixnormal_moment[mu4-rowchol4-colchol4-size4-expected4]",
"pymc/tests/test_distributions_moments.py::test_rice_moment[1.0-1.0-None-1.5485724605511453]",
"pymc/tests/test_distributions_moments.py::test_rice_moment[1.0-sigma1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_rice_moment[nu2-1.0-None-expected2]",
"pymc/tests/test_distributions_moments.py::test_rice_moment[nu3-sigma3-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_density_dist_default_moment_univariate[None-None-0.0]",
"pymc/tests/test_distributions_moments.py::test_density_dist_default_moment_univariate[None-5-expected1]",
"pymc/tests/test_distributions_moments.py::test_density_dist_default_moment_univariate[custom_moment-None-5]",
"pymc/tests/test_distributions_moments.py::test_density_dist_default_moment_univariate[custom_moment-size3-expected3]",
"pymc/tests/test_distributions_moments.py::test_density_dist_custom_moment_univariate[()]",
"pymc/tests/test_distributions_moments.py::test_density_dist_custom_moment_univariate[(2,)]",
"pymc/tests/test_distributions_moments.py::test_density_dist_custom_moment_univariate[(3,",
"pymc/tests/test_distributions_moments.py::test_density_dist_custom_moment_multivariate[()]",
"pymc/tests/test_distributions_moments.py::test_density_dist_custom_moment_multivariate[(2,)]",
"pymc/tests/test_distributions_moments.py::test_density_dist_custom_moment_multivariate[(3,",
"pymc/tests/test_distributions_moments.py::test_density_dist_default_moment_multivariate[True-size0]",
"pymc/tests/test_distributions_moments.py::test_density_dist_default_moment_multivariate[True-size1]",
"pymc/tests/test_distributions_moments.py::test_density_dist_default_moment_multivariate[True-size2]",
"pymc/tests/test_distributions_moments.py::test_density_dist_default_moment_multivariate[False-size3]",
"pymc/tests/test_distributions_moments.py::test_density_dist_default_moment_multivariate[False-size4]",
"pymc/tests/test_distributions_moments.py::test_simulator_moment[None-1-0]",
"pymc/tests/test_distributions_moments.py::test_simulator_moment[None-1-[0",
"pymc/tests/test_distributions_moments.py::test_simulator_moment[None-[1",
"pymc/tests/test_distributions_moments.py::test_simulator_moment[3-1-0]",
"pymc/tests/test_distributions_moments.py::test_simulator_moment[3-1-[0",
"pymc/tests/test_distributions_moments.py::test_simulator_moment[3-[1",
"pymc/tests/test_distributions_moments.py::test_simulator_moment[(5,",
"pymc/tests/test_distributions_moments.py::test_lkjcorr_moment[3-1-None-expected0]",
"pymc/tests/test_distributions_moments.py::test_lkjcorr_moment[5-1-None-expected1]",
"pymc/tests/test_distributions_moments.py::test_lkjcorr_moment[3-1-1-expected2]",
"pymc/tests/test_distributions_moments.py::test_lkjcorr_moment[5-1-size3-expected3]"
] | [] | Apache License 2.0 | 12,228 | 830 | [
"pymc/distributions/multivariate.py",
"pymc/distributions/shape_utils.py"
] |
qiboteam__qibo-550 | 790fd4f45587f892a30c266bd6c87266a1926c39 | 2022-02-15 16:10:22 | 0209995beed0a681c0e0551ea7dc60e521116418 | diff --git a/setup.py b/setup.py
index 3332da87f..7a1aaa439 100644
--- a/setup.py
+++ b/setup.py
@@ -60,6 +60,7 @@ setup(
"psutil",
"pyyaml",
"importlib_metadata",
+ "tabulate"
],
extras_require={
"docs": ["sphinx", "sphinx_rtd_theme", "recommonmark", "sphinxcontrib-bibtex", "sphinx_markdown_tables", "nbsphinx", "IPython"],
diff --git a/src/qibo/abstractions/circuit.py b/src/qibo/abstractions/circuit.py
index bee382e2d..3029054f3 100644
--- a/src/qibo/abstractions/circuit.py
+++ b/src/qibo/abstractions/circuit.py
@@ -937,12 +937,14 @@ class AbstractCircuit(ABC):
return len(qubits), gate_list
- def draw(self, line_wrap=70) -> str:
+ def draw(self, line_wrap=70, legend=False) -> str:
"""Draw text circuit using unicode symbols.
Args:
line_wrap (int): maximum number of characters per line. This option
split the circuit text diagram in chunks of line_wrap characters.
+ legend (bool): If ``True`` prints a legend below the circuit for
+ callbacks and channels. Default is ``False``.
Return:
String containing text circuit diagram.
@@ -955,7 +957,12 @@ class AbstractCircuit(ABC):
"crx": "RX", "cry": "RY", "crz": "RZ",
"cu1": "U1", "cu3": "U3", "ccx": "X",
"id": "I", "measure": "M", "fsim": "f",
- "generalizedfsim": "gf", "Unitary": "U", "fswap":"fx"}
+ "generalizedfsim": "gf", "Unitary": "U", "fswap":"fx",
+ "PauliNoiseChannel": "PN", "KrausChannel": "K",
+ "UnitaryChannel": "U", "ThermalRelaxationChannel": "TR",
+ "ResetChannel": "R", "PartialTrace": "PT",
+ "EntanglementEntropy": "EE", "Norm": "N",
+ "Overlap": "O", "Energy": "E"}
# build string representation of gates
matrix = [[] for _ in range(self.nqubits)]
@@ -965,7 +972,10 @@ class AbstractCircuit(ABC):
if gate.name not in labels:
raise_error(NotImplementedError, f"{gate.name} gate is not supported by `circuit.draw`")
gate_name = labels.get(gate.name)
- targets = list(gate.target_qubits)
+ if isinstance(gate, gates.CallbackGate):
+ targets = list(range(self.nqubits))
+ else:
+ targets = list(gate.target_qubits)
controls = list(gate.control_qubits)
# identify boundaries
@@ -1013,6 +1023,17 @@ class AbstractCircuit(ABC):
output += f'q{q}' + ' ' * (len(str(self.nqubits))-len(str(q))) + \
': ─' + ''.join(matrix[q]) + '\n'
+ # legend
+ if legend:
+ from tabulate import tabulate
+ names = [i.name for i in self.queue \
+ if isinstance(i,gates.CallbackGate) or "Channel" in i.name]
+ names = list(dict.fromkeys(names))
+ table = tabulate([[i, labels[i]] for i in names],
+ headers=['Gate', 'Symbol'],
+ tablefmt='orgtbl')
+ table = '\n Legend for callbacks and channels: \n' + table
+
# line wrap
if line_wrap:
loutput = output.splitlines()
@@ -1040,4 +1061,7 @@ class AbstractCircuit(ABC):
if loutput is not None:
output = ''.join(loutput)
+ if legend:
+ output += table
+
return output.rstrip('\n')
| Draw Noise Channel Gates
**Is your feature request related to a problem? Please describe.**
I have implement a customised noise model for my quantum circuit. However, I could not use the built in .draw() function to visualise my quantum circuit. It would be difficult to check whether I have placed my quantum channels correctly.
> ~/miniconda3/envs/mypsi4/lib//python3.8/site-packages/qibo/abstractions/circuit.py in draw(self, line_wrap)
>953 for gate in self.queue:
>954 if gate.name not in labels:
>--> 955 raise_error(NotImplementedError, f"{gate.name} gate is not supported by `circuit.draw`")
>956 gate_name = labels.get(gate.name)
>957 targets = list(gate.target_qubits)
>
>~/miniconda3/envs/mypsi4/lib//python3.8/site-packages/qibo/config.py in raise_error(exception, message, args)
>50 raise exception(message, args)
>51 else:
>---> 52 raise exception(message)
>53
>54
>
> NotImplementedError: PauliNoiseChannel gate is not supported by `circuit.draw`
**Describe the solution you'd like**
Draw a quantum circuit with simple special symbols that represents the single qubit noise channels gates (Thermal, Pauli, etc) for the purpose of checking gates placement quickly.
**Describe alternatives you've considered**
I have place all the gates in a list before adding them to the quantum circuit one by one. By printing out the gate type and qubit index of each gate, one can 'visualise' the quantum circuit with great difficulty.
**Additional context**
None | qiboteam/qibo | diff --git a/src/qibo/tests/test_abstract_circuit.py b/src/qibo/tests/test_abstract_circuit.py
index dfa3ba1ef..05c6b567b 100644
--- a/src/qibo/tests/test_abstract_circuit.py
+++ b/src/qibo/tests/test_abstract_circuit.py
@@ -549,3 +549,49 @@ def test_circuit_draw_not_supported_gates():
c.add(gates.Flatten(1))
with pytest.raises(NotImplementedError):
c.draw()
[email protected]("legend", [True, False])
+def test_circuit_draw_channels(legend):
+ """Check that channels are drawn correctly"""
+ from qibo.models import Circuit as circuit
+ c = circuit(2, density_matrix=True)
+ c.add(gates.H(0))
+ c.add(gates.PauliNoiseChannel(0, 0.1, 0.0, 0.2))
+ c.add(gates.H(1))
+ c.add(gates.PauliNoiseChannel(1, 0.0, 0.2, 0.1))
+ c.add(gates.CNOT(0, 1))
+ c.add(gates.PauliNoiseChannel(0, 0.1, 0.0, 0.2))
+ c.add(gates.PauliNoiseChannel(1, 0.0, 0.2, 0.1))
+
+ ref = 'q0: ─H─PN─o─PN─\n' \
+ 'q1: ─H─PN─X─PN─'
+
+ if legend:
+ ref += '\n\n Legend for callbacks and channels: \n' \
+ '| Gate | Symbol |\n' \
+ '|-------------------+----------|\n' \
+ '| PauliNoiseChannel | PN |'
+
+ assert c.draw(legend=legend) == ref
+
[email protected]("legend", [True, False])
+def test_circuit_draw_callbacks(legend):
+ """Check that callbacks are drawn correcly"""
+ from qibo.callbacks import EntanglementEntropy
+ entropy = EntanglementEntropy([0])
+ c = Circuit(2)
+ c.add(gates.CallbackGate(entropy))
+ c.add(gates.H(0))
+ c.add(gates.CallbackGate(entropy))
+ c.add(gates.CNOT(0, 1))
+ c.add(gates.CallbackGate(entropy))
+
+ ref = 'q0: ─EE─H─EE─o─EE─\n' \
+ 'q1: ─EE───EE─X─EE─'
+
+ if legend:
+ ref += '\n\n Legend for callbacks and channels: \n' \
+ '| Gate | Symbol |\n'\
+ '|---------------------+----------|\n'\
+ '| EntanglementEntropy | EE |'
+
+ assert c.draw(legend=legend) == ref
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.7",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi @ file:///croot/certifi_1671487769961/work/certifi
cma==4.0.0
cycler==0.11.0
exceptiongroup==1.2.2
fonttools==4.38.0
importlib-metadata==6.7.0
iniconfig==2.0.0
joblib==1.3.2
kiwisolver==1.4.5
matplotlib==3.5.3
mpmath==1.3.0
numpy==1.21.6
packaging==24.0
Pillow==9.5.0
pluggy==1.2.0
psutil==7.0.0
pyparsing==3.1.4
pytest==7.4.4
python-dateutil==2.9.0.post0
PyYAML==6.0.1
-e git+https://github.com/qiboteam/qibo.git@790fd4f45587f892a30c266bd6c87266a1926c39#egg=qibo
scipy==1.7.3
six==1.17.0
sympy==1.10.1
tomli==2.0.1
typing_extensions==4.7.1
zipp==3.15.0
| name: qibo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cma==4.0.0
- cycler==0.11.0
- exceptiongroup==1.2.2
- fonttools==4.38.0
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- joblib==1.3.2
- kiwisolver==1.4.5
- matplotlib==3.5.3
- mpmath==1.3.0
- numpy==1.21.6
- packaging==24.0
- pillow==9.5.0
- pluggy==1.2.0
- psutil==7.0.0
- pyparsing==3.1.4
- pytest==7.4.4
- python-dateutil==2.9.0.post0
- pyyaml==6.0.1
- scipy==1.7.3
- six==1.17.0
- sympy==1.10.1
- tomli==2.0.1
- typing-extensions==4.7.1
- zipp==3.15.0
prefix: /opt/conda/envs/qibo
| [
"src/qibo/tests/test_abstract_circuit.py::test_circuit_draw_channels[False]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_draw_callbacks[False]"
] | [
"src/qibo/tests/test_abstract_circuit.py::test_circuit_draw_channels[True]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_draw_callbacks[True]"
] | [
"src/qibo/tests/test_abstract_circuit.py::test_parametrizedgates_class",
"src/qibo/tests/test_abstract_circuit.py::test_queue_class",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_init",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_init_errors[0]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_init_errors[-10]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_init_errors[2.5]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_add",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_add_errors",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_add_iterable",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_add_generator",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_add_nested_generator",
"src/qibo/tests/test_abstract_circuit.py::test_set_nqubits",
"src/qibo/tests/test_abstract_circuit.py::test_add_measurement",
"src/qibo/tests/test_abstract_circuit.py::test_gate_types",
"src/qibo/tests/test_abstract_circuit.py::test_gates_of_type",
"src/qibo/tests/test_abstract_circuit.py::test_summary",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_addition[False]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_addition[True]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_addition_errors",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_on_qubits",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_on_qubits_errors",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_copy[False]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_copy[True]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_copy_with_measurements",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_invert[False]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_invert[True]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_decompose[False]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_decompose[True]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_with_noise[noise_map0-False]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_with_noise[noise_map0-True]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_with_noise[noise_map1-False]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_with_noise[noise_map1-True]",
"src/qibo/tests/test_abstract_circuit.py::test_get_parameters[list-True-True]",
"src/qibo/tests/test_abstract_circuit.py::test_get_parameters[list-True-False]",
"src/qibo/tests/test_abstract_circuit.py::test_get_parameters[list-False-True]",
"src/qibo/tests/test_abstract_circuit.py::test_get_parameters[list-False-False]",
"src/qibo/tests/test_abstract_circuit.py::test_get_parameters[dict-True-True]",
"src/qibo/tests/test_abstract_circuit.py::test_get_parameters[dict-True-False]",
"src/qibo/tests/test_abstract_circuit.py::test_get_parameters[dict-False-True]",
"src/qibo/tests/test_abstract_circuit.py::test_get_parameters[dict-False-False]",
"src/qibo/tests/test_abstract_circuit.py::test_get_parameters[flatlist-True-True]",
"src/qibo/tests/test_abstract_circuit.py::test_get_parameters[flatlist-True-False]",
"src/qibo/tests/test_abstract_circuit.py::test_get_parameters[flatlist-False-True]",
"src/qibo/tests/test_abstract_circuit.py::test_get_parameters[flatlist-False-False]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_set_parameters_with_list[True]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_set_parameters_with_list[False]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_set_parameters_with_dictionary[True]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_set_parameters_with_dictionary[False]",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_set_parameters_errors",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_draw",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_draw_line_wrap",
"src/qibo/tests/test_abstract_circuit.py::test_circuit_draw_not_supported_gates"
] | [] | Apache License 2.0 | 12,229 | 986 | [
"setup.py",
"src/qibo/abstractions/circuit.py"
] |
|
python-visualization__folium-1570 | 67aab11039cd990d73fdf14566380286835ff84b | 2022-02-15 18:12:03 | 67aab11039cd990d73fdf14566380286835ff84b | beautah: I chased down the all the failed checks and the remaining seem to be not related to any changes I've made. Happy to fix anything else up if needed though | diff --git a/folium/plugins/float_image.py b/folium/plugins/float_image.py
index 00f32e03..fd3fc21f 100644
--- a/folium/plugins/float_image.py
+++ b/folium/plugins/float_image.py
@@ -12,6 +12,7 @@ class FloatImage(MacroElement):
position:absolute;
bottom:{{this.bottom}}%;
left:{{this.left}}%;
+ width:{{this.width}}%;
}
</style>
{% endmacro %}
@@ -24,9 +25,10 @@ class FloatImage(MacroElement):
{% endmacro %}
""")
- def __init__(self, image, bottom=75, left=75):
+ def __init__(self, image, bottom=75, left=75, width=100):
super(FloatImage, self).__init__()
self._name = 'FloatImage'
self.image = image
self.bottom = bottom
self.left = left
+ self.width = width
| Styling Imported Images
New to Python, kinda dove into it headfirst to create an interactive map with data. Really enjoying it, and really loving what Folium can do.
I've added a legend.png to create a legend (temporarily, or perhaps permanently, unless something else can be recommended) that I'd like to style by adding a box-shadow as well as adding a radius to curve the edges if I so desire.
Perhaps it's already in the notes somewhere, but I couldn't find it!
The legend itself works as it should and doesn't interfere with interacting with each site bubble (each site is expected to have more than just a "name" at some point. Actual data/graphs/etc. will be contained for nearly each site.)
```python
# Create Map Legend
from folium.plugins import FloatImage
image_file='images/Legend.png'
FloatImage(image_file,bottom=5,left=5).add_to(map)
```
Another quick question: can the output HTML file be modified with a viewport tag to assist with scaling on a mobile environment? I haven't tried it yet, and I assume each time I compile the app after I make changes the subsequent HTML file is entirely overwritten.
Thank you!
| python-visualization/folium | diff --git a/tests/plugins/test_float_image.py b/tests/plugins/test_float_image.py
index 8fdf02f4..cf98028e 100644
--- a/tests/plugins/test_float_image.py
+++ b/tests/plugins/test_float_image.py
@@ -13,7 +13,7 @@ from jinja2 import Template
def test_float_image():
m = folium.Map([45., 3.], zoom_start=4)
url = 'https://raw.githubusercontent.com/SECOORA/static_assets/master/maps/img/rose.png'
- szt = plugins.FloatImage(url, bottom=60, left=70)
+ szt = plugins.FloatImage(url, bottom=60, left=70, width=20)
m.add_child(szt)
m._repr_html_()
@@ -35,6 +35,7 @@ def test_float_image():
position:absolute;
bottom:60%;
left:70%;
+ width:20%;
}
</style>
""")
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | branca==0.8.1
certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
-e git+https://github.com/python-visualization/folium.git@67aab11039cd990d73fdf14566380286835ff84b#egg=folium
idna==3.10
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
numpy==2.0.2
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
requests==2.32.3
tomli==2.2.1
urllib3==2.3.0
| name: folium
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- branca==0.8.1
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- folium==0.12.1.dev50+g67aab110
- idna==3.10
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- numpy==2.0.2
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.32.3
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/folium
| [
"tests/plugins/test_float_image.py::test_float_image"
] | [] | [] | [] | MIT License | 12,230 | 246 | [
"folium/plugins/float_image.py"
] |
dask__distributed-5822 | 9a266a049905004241701be0fde54a7866912267 | 2022-02-16 16:19:27 | 2d3fddc14d1e06fd06b9f0f4c3256f254f3c670e | github-actions[bot]: ## Unit Test Results
15 files + 15 15 suites +15 8h 33m 55s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "duration of all tests") + 8h 33m 55s
2 603 tests + 2 603 2 525 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "passed tests") + 2 525 78 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "skipped / disabled tests") + 78 0 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "failed tests") ±0
19 521 runs +19 521 18 194 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "passed tests") +18 194 1 327 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "skipped / disabled tests") +1 327 0 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "failed tests") ±0
Results for commit 02941db2. ± Comparison against base commit 8d0df89b.
fjetter: Your file formatting is a bit off. I would suggest to install pre-commit, see http://distributed.dask.org/en/stable/develop.html#code-formatting
| diff --git a/distributed/__init__.py b/distributed/__init__.py
index e44a119c..d33514ab 100644
--- a/distributed/__init__.py
+++ b/distributed/__init__.py
@@ -1,10 +1,12 @@
from . import config # isort:skip; load distributed configuration first
from . import widgets # isort:skip; load distributed widgets second
+
+
import dask
from dask.config import config # type: ignore
from ._version import get_versions
-from .actor import Actor, BaseActorFuture
+from .actor import Actor, ActorFuture, BaseActorFuture
from .client import (
Client,
CompatibleExecutor,
@@ -46,7 +48,20 @@ from .variable import Variable
from .worker import Reschedule, Worker, get_client, get_worker, print, secede, warn
from .worker_client import local_client, worker_client
-versions = get_versions()
-__version__ = versions["version"]
-__git_revision__ = versions["full-revisionid"]
-del get_versions, versions
+
+def __getattr__(name):
+ global __version__, __git_revision__
+
+ if name == "__version__":
+ from importlib.metadata import version
+
+ __version__ = version("distributed")
+ return __version__
+
+ if name == "__git_revision__":
+ from ._version import get_versions
+
+ __git_revision__ = get_versions()["full-revisionid"]
+ return __git_revision__
+
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
| importing distributed runs 4 `git` subprocesses in CI (when installed with -e)
I noticed that tests that run a dask subprocess are often flakey on CI (especially so on low performance macos runners)
https://github.com/dask/distributed/runs/4922796526?check_suite_focus=true#step:12:1849
This is an example of a process taking more than 5 seconds to boot on a mac in `test_dask_worker::test_memory_limit`:
```pytb
Traceback (most recent call last):
File "/Users/runner/miniconda3/envs/dask-distributed/bin/dask-worker", line 33, in <module>
sys.exit(load_entry_point('distributed', 'console_scripts', 'dask-worker')())
File "/Users/runner/miniconda3/envs/dask-distributed/bin/dask-worker", line 25, in importlib_load_entry_point
return next(matches).load()
File "/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/importlib/metadata.py", line 77, in load
module = import_module(match.group('module'))
File "/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 972, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 972, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/Users/runner/work/distributed/distributed/distributed/__init__.py", line 49, in <module>
versions = get_versions()
File "/Users/runner/work/distributed/distributed/distributed/_version.py", line 534, in get_versions
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
File "/Users/runner/work/distributed/distributed/distributed/_version.py", line 265, in git_pieces_from_vcs
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
File "/Users/runner/work/distributed/distributed/distributed/_version.py", line 78, in run_command
p = subprocess.Popen(
File "/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/subprocess.py", line 951, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/Users/runner/miniconda3/envs/dask-distributed/lib/python3.9/subprocess.py", line 1777, in _execute_child
part = os.read(errpipe_read, 50000)
KeyboardInterrupt
``` | dask/distributed | diff --git a/distributed/tests/test_init.py b/distributed/tests/test_init.py
new file mode 100644
index 00000000..c8e3f986
--- /dev/null
+++ b/distributed/tests/test_init.py
@@ -0,0 +1,13 @@
+from __future__ import annotations
+
+import importlib.metadata
+
+import distributed
+
+
+def test_version() -> None:
+ assert distributed.__version__ == importlib.metadata.version("distributed")
+
+
+def test_git_revision() -> None:
+ assert isinstance(distributed.__git_revision__, str)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 2022.02 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | click==8.1.8
cloudpickle==3.1.1
coverage==7.8.0
dask==2022.2.0
-e git+https://github.com/dask/distributed.git@9a266a049905004241701be0fde54a7866912267#egg=distributed
exceptiongroup==1.2.2
execnet==2.1.1
fsspec==2025.3.1
iniconfig==2.1.0
Jinja2==3.1.6
locket==1.0.0
MarkupSafe==3.0.2
msgpack==1.1.0
packaging==24.2
partd==1.4.2
pluggy==1.5.0
psutil==7.0.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
PyYAML==6.0.2
sortedcontainers==2.4.0
tblib==3.1.0
tomli==2.2.1
toolz==1.0.0
tornado==6.4.2
typing_extensions==4.13.0
zict==3.0.0
| name: distributed
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- click==8.1.8
- cloudpickle==3.1.1
- coverage==7.8.0
- dask==2022.2.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- fsspec==2025.3.1
- iniconfig==2.1.0
- jinja2==3.1.6
- locket==1.0.0
- markupsafe==3.0.2
- msgpack==1.1.0
- packaging==24.2
- partd==1.4.2
- pluggy==1.5.0
- psutil==7.0.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- pyyaml==6.0.2
- sortedcontainers==2.4.0
- tblib==3.1.0
- tomli==2.2.1
- toolz==1.0.0
- tornado==6.4.2
- typing-extensions==4.13.0
- zict==3.0.0
prefix: /opt/conda/envs/distributed
| [
"distributed/tests/test_init.py::test_version"
] | [] | [
"distributed/tests/test_init.py::test_git_revision"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,239 | 370 | [
"distributed/__init__.py"
] |
frictionlessdata__frictionless-py-998 | 164f611b3a1454893e35c4c70101c2678728818d | 2022-02-17 08:33:43 | 164f611b3a1454893e35c4c70101c2678728818d | AntoineAugusti: Not sure what I can do in regards to the ` project-assign` CI, let me know
AntoineAugusti: @roll Thanks, I've done the suggested updates | diff --git a/frictionless/field.py b/frictionless/field.py
index 2d812d86..085df79d 100644
--- a/frictionless/field.py
+++ b/frictionless/field.py
@@ -58,6 +58,7 @@ class Field(Metadata):
float_number=None,
decimal_char=None,
group_char=None,
+ example=None,
# Extra
schema=None,
):
@@ -77,6 +78,7 @@ class Field(Metadata):
self.setinitial("decimalChar", decimal_char)
self.setinitial("groupChar", group_char)
self.setinitial("rdfType", rdf_type)
+ self.setinitial("example", example)
self.__schema = schema
self.__type = None
super().__init__(descriptor)
@@ -286,6 +288,14 @@ class Field(Metadata):
"""
return self.get("groupChar", settings.DEFAULT_GROUP_CHAR)
+ @Metadata.property
+ def example(self):
+ """
+ Returns:
+ any: example value
+ """
+ return self.get("example", None)
+
# Expand
def expand(self):
diff --git a/frictionless/schema.py b/frictionless/schema.py
index 89bda78a..d409a476 100644
--- a/frictionless/schema.py
+++ b/frictionless/schema.py
@@ -302,6 +302,13 @@ class Schema(Metadata):
if field.builtin:
yield from field.metadata_errors
+ # Examples
+ for field in [f for f in self.fields if "example" in field]:
+ _, notes = field.read_cell(field.example)
+ if notes is not None:
+ note = 'example value for field "%s" is not valid' % field.name
+ yield errors.SchemaError(note=note)
+
# Primary Key
for name in self.primary_key:
if name not in self.field_names:
| Validate fields[].example for Schema
# Overview
Related to https://github.com/frictionlessdata/specs/pull/753
If a field has an `example` key, make sure that the `example` is valid according to the field's type and constraints.
| frictionlessdata/frictionless-py | diff --git a/tests/test_field.py b/tests/test_field.py
index 946f42bf..ba8389bf 100644
--- a/tests/test_field.py
+++ b/tests/test_field.py
@@ -321,6 +321,12 @@ def test_field_read_cell_multiple_constraints():
assert read("") == (None, None)
[email protected]("example_value", [(None), (42), ("foo")])
+def test_field_with_example_set(example_value):
+ field = Field({"name": "name", "type": "string", "example": example_value})
+ assert field.example == example_value
+
+
# Import/Export
diff --git a/tests/test_schema.py b/tests/test_schema.py
index 9d527d98..9b9e7f09 100644
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -255,6 +255,37 @@ def test_schema_metadata_error_message():
assert "is not valid under any of the given schema" in note
+def test_schema_valid_examples():
+ schema = Schema(
+ {
+ "fields": [
+ {"name": "name", "type": "string", "example": "John"},
+ {"name": "age", "type": "integer", "example": 42},
+ ]
+ }
+ )
+ assert schema.get_field("name").example == "John"
+ assert len(schema.metadata_errors) == 0
+
+
+def test_schema_invalid_example():
+ schema = Schema(
+ {
+ "fields": [
+ {
+ "name": "name",
+ "type": "string",
+ "example": None,
+ "constraints": {"required": True},
+ }
+ ]
+ }
+ )
+ note = schema.metadata_errors[0]["note"]
+ assert len(schema.metadata_errors) == 1
+ assert 'example value for field "name" is not valid' == note
+
+
@pytest.mark.parametrize("create_descriptor", [(False,), (True,)])
def test_schema_standard_specs_properties(create_descriptor):
options = dict(
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 4.25 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install --upgrade -e .[bigquery,ckan,excel,gsheets,html,json,ods,pandas,s3,server,spss,sql,dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc",
"apt-get install -y postgresql libpq-dev"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asttokens==3.0.0
attrs==25.3.0
black==23.12.1
blinker==1.9.0
boto3==1.37.23
botocore==1.37.23
cached-property==2.0.1
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.4.1
ckanapi==4.8
click==8.1.8
coverage==7.8.0
cryptography==44.0.2
cssselect==1.3.0
databind==4.5.2
databind.core==4.5.2
databind.json==4.5.2
decorator==5.2.1
deepmerge==2.0
Deprecated==1.2.18
docopt==0.6.2
docspec==2.2.1
docspec-python==2.2.1
docstring_parser==0.11
et_xmlfile==2.0.0
exceptiongroup==1.2.2
execnet==2.1.1
executing==2.2.0
ezodf==0.3.2
Flask==3.1.0
-e git+https://github.com/frictionlessdata/frictionless-py.git@164f611b3a1454893e35c4c70101c2678728818d#egg=frictionless
gitdb==4.0.12
GitPython==3.1.44
giturlparse==0.12.0
google-api-core==2.24.2
google-api-python-client==2.166.0
google-auth==2.38.0
google-auth-httplib2==0.2.0
google-auth-oauthlib==1.2.1
googleapis-common-protos==1.69.2
greenlet==3.1.1
gunicorn==23.0.0
httplib2==0.22.0
idna==3.10
ijson==3.3.0
importlib_metadata==8.6.1
iniconfig==2.1.0
ipython==8.18.1
isodate==0.7.2
itsdangerous==2.2.0
jedi==0.19.2
Jinja2==3.1.6
jmespath==1.0.1
jsonlines==4.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
livemark==0.110.8
livereload==2.7.1
lxml==5.3.1
markdown-it-py==3.0.0
marko==1.3.1
MarkupSafe==3.0.2
matplotlib-inline==0.1.7
mccabe==0.7.0
mdurl==0.1.2
moto==5.1.2
multidict==6.2.0
mypy==1.15.0
mypy-extensions==1.0.0
nr-date==2.1.0
nr-stream==1.1.5
nr.util==0.8.12
numpy==2.0.2
oauth2client==4.1.3
oauthlib==3.2.2
openpyxl==3.1.5
packaging==24.2
pandas==2.2.3
parso==0.8.4
pathspec==0.12.1
petl==1.7.15
pexpect==4.9.0
platformdirs==4.3.7
pluggy==1.5.0
prompt_toolkit==3.0.50
propcache==0.3.1
proto-plus==1.26.1
protobuf==6.30.2
psycopg2==2.9.10
ptyprocess==0.7.0
pure_eval==0.2.3
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycodestyle==2.13.0
pycparser==2.22
pydoc-markdown==4.8.2
pydocstyle==6.3.0
pyflakes==3.3.2
Pygments==2.19.1
pygsheets==2.0.6
pylama==8.4.1
PyMySQL==1.1.1
pyparsing==3.2.3
pyquery==1.4.3
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-only==2.1.2
pytest-timeout==2.3.1
pytest-vcr==1.0.2
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
python-dotenv==1.1.0
python-slugify==8.0.4
pytz==2025.2
PyYAML==6.0.2
referencing==0.36.2
requests==2.32.3
requests-mock==1.12.1
requests-oauthlib==2.0.0
responses==0.25.7
rfc3986==2.0.0
rich==14.0.0
rpds-py==0.24.0
rsa==4.9
s3transfer==0.11.4
savReaderWriter==3.4.2
shellingham==1.5.4
simpleeval==1.0.3
simplejson==3.20.1
six==1.17.0
smmap==5.0.2
snowballstemmer==2.2.0
SQLAlchemy==2.0.40
stack-data==0.6.3
stringcase==1.2.0
text-unidecode==1.3
tomli==2.2.1
tomli_w==1.2.0
tornado==6.4.2
traitlets==5.14.3
typeapi==2.2.4
typer==0.15.2
typing_extensions==4.13.0
tzdata==2025.2
uritemplate==4.1.1
urllib3==1.26.20
validators==0.34.0
vcrpy==7.0.0
watchdog==6.0.0
wcwidth==0.2.13
Werkzeug==3.1.3
wrapt==1.17.2
xlrd==2.0.1
xlwt==1.3.0
xmltodict==0.14.2
yapf==0.43.0
yarl==1.18.3
zipp==3.21.0
| name: frictionless-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asttokens==3.0.0
- attrs==25.3.0
- black==23.12.1
- blinker==1.9.0
- boto3==1.37.23
- botocore==1.37.23
- cached-property==2.0.1
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- ckanapi==4.8
- click==8.1.8
- coverage==7.8.0
- cryptography==44.0.2
- cssselect==1.3.0
- databind==4.5.2
- databind-core==4.5.2
- databind-json==4.5.2
- decorator==5.2.1
- deepmerge==2.0
- deprecated==1.2.18
- docopt==0.6.2
- docspec==2.2.1
- docspec-python==2.2.1
- docstring-parser==0.11
- et-xmlfile==2.0.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- executing==2.2.0
- ezodf==0.3.2
- flask==3.1.0
- frictionless==4.25.1
- gitdb==4.0.12
- gitpython==3.1.44
- giturlparse==0.12.0
- google-api-core==2.24.2
- google-api-python-client==2.166.0
- google-auth==2.38.0
- google-auth-httplib2==0.2.0
- google-auth-oauthlib==1.2.1
- googleapis-common-protos==1.69.2
- greenlet==3.1.1
- gunicorn==23.0.0
- httplib2==0.22.0
- idna==3.10
- ijson==3.3.0
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipython==8.18.1
- isodate==0.7.2
- itsdangerous==2.2.0
- jedi==0.19.2
- jinja2==3.1.6
- jmespath==1.0.1
- jsonlines==4.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- livemark==0.110.8
- livereload==2.7.1
- lxml==5.3.1
- markdown-it-py==3.0.0
- marko==1.3.1
- markupsafe==3.0.2
- matplotlib-inline==0.1.7
- mccabe==0.7.0
- mdurl==0.1.2
- moto==5.1.2
- multidict==6.2.0
- mypy==1.15.0
- mypy-extensions==1.0.0
- nr-date==2.1.0
- nr-stream==1.1.5
- nr-util==0.8.12
- numpy==2.0.2
- oauth2client==4.1.3
- oauthlib==3.2.2
- openpyxl==3.1.5
- packaging==24.2
- pandas==2.2.3
- parso==0.8.4
- pathspec==0.12.1
- petl==1.7.15
- pexpect==4.9.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prompt-toolkit==3.0.50
- propcache==0.3.1
- proto-plus==1.26.1
- protobuf==6.30.2
- psycopg2==2.9.10
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pycodestyle==2.13.0
- pycparser==2.22
- pydoc-markdown==4.8.2
- pydocstyle==6.3.0
- pyflakes==3.3.2
- pygments==2.19.1
- pygsheets==2.0.6
- pylama==8.4.1
- pymysql==1.1.1
- pyparsing==3.2.3
- pyquery==1.4.3
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-only==2.1.2
- pytest-timeout==2.3.1
- pytest-vcr==1.0.2
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- python-dotenv==1.1.0
- python-slugify==8.0.4
- pytz==2025.2
- pyyaml==6.0.2
- referencing==0.36.2
- requests==2.32.3
- requests-mock==1.12.1
- requests-oauthlib==2.0.0
- responses==0.25.7
- rfc3986==2.0.0
- rich==14.0.0
- rpds-py==0.24.0
- rsa==4.9
- s3transfer==0.11.4
- savreaderwriter==3.4.2
- shellingham==1.5.4
- simpleeval==1.0.3
- simplejson==3.20.1
- six==1.17.0
- smmap==5.0.2
- snowballstemmer==2.2.0
- sqlalchemy==2.0.40
- stack-data==0.6.3
- stringcase==1.2.0
- text-unidecode==1.3
- tomli==2.2.1
- tomli-w==1.2.0
- tornado==6.4.2
- traitlets==5.14.3
- typeapi==2.2.4
- typer==0.15.2
- typing-extensions==4.13.0
- tzdata==2025.2
- uritemplate==4.1.1
- urllib3==1.26.20
- validators==0.34.0
- vcrpy==7.0.0
- watchdog==6.0.0
- wcwidth==0.2.13
- werkzeug==3.1.3
- wrapt==1.17.2
- xlrd==2.0.1
- xlwt==1.3.0
- xmltodict==0.14.2
- yapf==0.43.0
- yarl==1.18.3
- zipp==3.21.0
prefix: /opt/conda/envs/frictionless-py
| [
"tests/test_field.py::test_field_with_example_set[None]",
"tests/test_field.py::test_field_with_example_set[42]",
"tests/test_field.py::test_field_with_example_set[foo]",
"tests/test_schema.py::test_schema_valid_examples",
"tests/test_schema.py::test_schema_invalid_example"
] | [] | [
"tests/test_field.py::test_field",
"tests/test_field.py::test_field_defaults",
"tests/test_field.py::test_field_read_cell",
"tests/test_field.py::test_field_read_cell_string_missing_values",
"tests/test_field.py::test_field_read_cell_number_missingValues",
"tests/test_field.py::test_field_standard_specs_properties[create_descriptor0]",
"tests/test_field.py::test_field_standard_specs_properties[create_descriptor1]",
"tests/test_field.py::test_field_description_html",
"tests/test_field.py::test_field_description_html_multiline",
"tests/test_field.py::test_field_description_html_not_set",
"tests/test_field.py::test_field_description_text",
"tests/test_field.py::test_field_description_text_plain",
"tests/test_field.py::test_field_constraint_field_type[constraints0-any-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints1-array-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints2-boolean-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints3-date-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints4-datetime-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints5-duration-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints6-geojson-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints7-geopoint-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints8-integer-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints9-number-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints10-object-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints11-string-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints12-time-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints13-year-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints14-yearmonth-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints15-any-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints16-array-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints17-boolean-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints18-date-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints19-datetime-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints20-duration-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints21-geojson-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints22-geopoint-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints23-integer-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints24-number-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints25-object-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints26-string-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints27-time-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints28-year-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints29-yearmonth-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints30-any-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints31-array-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints32-boolean-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints33-date-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints34-datetime-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints35-duration-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints36-geojson-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints37-geopoint-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints38-integer-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints39-number-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints40-object-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints41-string-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints42-time-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints43-year-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints44-yearmonth-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints45-any-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints46-array-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints47-boolean-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints48-date-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints49-datetime-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints50-duration-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints51-geojson-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints52-geopoint-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints53-integer-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints54-number-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints55-object-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints56-string-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints57-time-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints58-year-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints59-yearmonth-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints60-any-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints61-array-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints62-boolean-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints63-date-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints64-datetime-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints65-duration-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints66-geojson-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints67-geopoint-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints68-integer-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints69-number-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints70-object-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints71-string-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints72-time-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints73-year-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints74-yearmonth-False]",
"tests/test_field.py::test_field_read_cell_required",
"tests/test_field.py::test_field_read_cell_minLength",
"tests/test_field.py::test_field_read_cell_maxLength",
"tests/test_field.py::test_field_read_cell_minimum",
"tests/test_field.py::test_field_read_cell_maximum",
"tests/test_field.py::test_field_read_cell_pattern",
"tests/test_field.py::test_field_read_cell_enum",
"tests/test_field.py::test_field_read_cell_multiple_constraints",
"tests/test_field.py::test_field_to_copy",
"tests/test_schema.py::test_schema",
"tests/test_schema.py::test_schema_extract_metadata_error",
"tests/test_schema.py::test_schema_metadata_invalid",
"tests/test_schema.py::test_schema_descriptor",
"tests/test_schema.py::test_schema_descriptor_path",
"tests/test_schema.py::test_schema_descriptor_url",
"tests/test_schema.py::test_schema_read_cells",
"tests/test_schema.py::test_schema_read_cells_null_values",
"tests/test_schema.py::test_schema_read_cells_too_short",
"tests/test_schema.py::test_schema_read_cells_too_long",
"tests/test_schema.py::test_schema_read_cells_wrong_type",
"tests/test_schema.py::test_schema_missing_values",
"tests/test_schema.py::test_schema_fields",
"tests/test_schema.py::test_schema_get_field",
"tests/test_schema.py::test_schema_get_field_error_not_found",
"tests/test_schema.py::test_schema_update_field",
"tests/test_schema.py::test_schema_has_field",
"tests/test_schema.py::test_schema_remove_field",
"tests/test_schema.py::test_schema_remove_field_error_not_found",
"tests/test_schema.py::test_schema_field_names",
"tests/test_schema.py::test_schema_primary_key",
"tests/test_schema.py::test_schema_foreign_keys",
"tests/test_schema.py::test_schema_add_then_remove_field",
"tests/test_schema.py::test_schema_primary_foreign_keys_as_array",
"tests/test_schema.py::test_schema_primary_foreign_keys_as_string",
"tests/test_schema.py::test_schema_metadata_valid",
"tests/test_schema.py::test_schema_metadata_not_valid",
"tests/test_schema.py::test_schema_metadata_not_valid_multiple_errors",
"tests/test_schema.py::test_schema_metadata_not_valid_multiple_errors_with_pk",
"tests/test_schema.py::test_schema_metadata_error_message",
"tests/test_schema.py::test_schema_standard_specs_properties[create_descriptor0]",
"tests/test_schema.py::test_schema_standard_specs_properties[create_descriptor1]",
"tests/test_schema.py::test_schema_descriptor_expand",
"tests/test_schema.py::test_schema_to_copy",
"tests/test_schema.py::test_schema_to_json",
"tests/test_schema.py::test_schema_to_yaml",
"tests/test_schema.py::test_schema_from_jsonschema",
"tests/test_schema.py::test_schema_metadata_bad_schema_format",
"tests/test_schema.py::test_schema_field_date_format_issue_177",
"tests/test_schema.py::test_schema_field_time_format_issue_177",
"tests/test_schema.py::test_schema_add_remove_field_issue_218",
"tests/test_schema.py::test_schema_not_supported_type_issue_goodatbles_304"
] | [] | MIT License | 12,245 | 462 | [
"frictionless/field.py",
"frictionless/schema.py"
] |
alephdata__servicelayer-60 | ebde2a96658c9ecda9a7f9048cd106ef580dda5b | 2022-02-17 11:13:08 | ebde2a96658c9ecda9a7f9048cd106ef580dda5b | diff --git a/servicelayer/worker.py b/servicelayer/worker.py
index 939a3a3..daa0be9 100644
--- a/servicelayer/worker.py
+++ b/servicelayer/worker.py
@@ -1,5 +1,6 @@
import signal
import logging
+import sys
from threading import Thread
from banal import ensure_list
from abc import ABC, abstractmethod
@@ -23,8 +24,10 @@ class Worker(ABC):
self.exit_code = 0
def _handle_signal(self, signal, frame):
- log.warning("Shutting down worker (signal %s)", signal)
+ log.warning(f"Shutting down worker (signal {signal})")
self.exit_code = int(signal)
+ # Exit eagerly without waiting for current task to finish running
+ sys.exit(self.exit_code)
def handle_safe(self, task):
try:
| Worker doesn't exit on KeyboardInterrupt when running in multi-threaded mode | alephdata/servicelayer | diff --git a/tests/test_worker.py b/tests/test_worker.py
index 60e14a9..f4af193 100644
--- a/tests/test_worker.py
+++ b/tests/test_worker.py
@@ -1,4 +1,5 @@
from unittest import TestCase
+import pytest
from servicelayer.cache import get_fakeredis
from servicelayer.jobs import Job
@@ -34,8 +35,6 @@ class WorkerTest(TestCase):
assert job.is_done()
assert worker.exit_code == 0, worker.exit_code
assert worker.test_done == 1, worker.test_done
- worker._handle_signal(5, None)
- assert worker.exit_code == 5, worker.exit_code
worker.retry(task)
worker.run(blocking=False)
assert job.is_done()
@@ -45,3 +44,9 @@ class WorkerTest(TestCase):
worker.run(blocking=False)
assert job.is_done()
assert worker.exit_code == 0, worker.exit_code
+ try:
+ worker._handle_signal(5, None)
+ except SystemExit as exc:
+ assert exc.code == 5, exc.code
+ with pytest.raises(SystemExit) as exc: # noqa
+ worker._handle_signal(5, None)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 1.18 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y libicu-dev"
],
"python": "3.8",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | backports.tarfile==1.2.0
banal==1.0.6
boto3==1.37.23
botocore==1.37.23
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.6.1
cryptography==44.0.2
Deprecated==1.2.18
docutils==0.20.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
fakeredis==1.7.0
greenlet==3.1.1
id==1.5.0
idna==3.10
importlib_metadata==8.5.0
importlib_resources==6.4.5
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
Jinja2==3.1.6
jmespath==1.0.1
keyring==25.5.0
markdown-it-py==3.0.0
MarkupSafe==2.1.5
mdurl==0.1.2
more-itertools==10.5.0
moto==5.0.28
nh3==0.2.21
normality==2.5.0
packaging @ file:///croot/packaging_1720101850331/work
pluggy @ file:///tmp/build/80754af9/pluggy_1648042571233/work
pycparser==2.22
Pygments==2.19.1
pytest @ file:///croot/pytest_1717793244625/work
pytest-cov==5.0.0
pytest-mock==3.14.0
python-dateutil==2.9.0.post0
PyYAML==6.0.2
readme_renderer==43.0
redis==4.0.2
requests==2.32.3
requests-toolbelt==1.0.0
responses==0.25.7
rfc3986==2.0.0
rich==14.0.0
s3transfer==0.11.4
SecretStorage==3.3.3
-e git+https://github.com/alephdata/servicelayer.git@ebde2a96658c9ecda9a7f9048cd106ef580dda5b#egg=servicelayer
six==1.17.0
sortedcontainers==2.4.0
SQLAlchemy==2.0.40
structlog==21.5.0
text-unidecode==1.3
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
twine==6.1.0
typing_extensions==4.13.0
urllib3==1.26.20
Werkzeug==3.0.6
wrapt==1.17.2
xmltodict==0.14.2
zipp==3.20.2
| name: servicelayer
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py38h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.1=py38h06a4308_0
- pip=24.2=py38h06a4308_0
- pluggy=1.0.0=py38h06a4308_1
- pytest=7.4.4=py38h06a4308_0
- python=3.8.20=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.1.0=py38h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py38h06a4308_0
- wheel=0.44.0=py38h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- backports-tarfile==1.2.0
- banal==1.0.6
- boto3==1.37.23
- botocore==1.37.23
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.6.1
- cryptography==44.0.2
- deprecated==1.2.18
- docutils==0.20.1
- fakeredis==1.7.0
- greenlet==3.1.1
- id==1.5.0
- idna==3.10
- importlib-metadata==8.5.0
- importlib-resources==6.4.5
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- jinja2==3.1.6
- jmespath==1.0.1
- keyring==25.5.0
- markdown-it-py==3.0.0
- markupsafe==2.1.5
- mdurl==0.1.2
- more-itertools==10.5.0
- moto==5.0.28
- nh3==0.2.21
- normality==2.5.0
- pycparser==2.22
- pygments==2.19.1
- pytest-cov==5.0.0
- pytest-mock==3.14.0
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- readme-renderer==43.0
- redis==4.0.2
- requests==2.32.3
- requests-toolbelt==1.0.0
- responses==0.25.7
- rfc3986==2.0.0
- rich==14.0.0
- s3transfer==0.11.4
- secretstorage==3.3.3
- six==1.17.0
- sortedcontainers==2.4.0
- sqlalchemy==2.0.40
- structlog==21.5.0
- text-unidecode==1.3
- twine==6.1.0
- typing-extensions==4.13.0
- urllib3==1.26.20
- werkzeug==3.0.6
- wrapt==1.17.2
- xmltodict==0.14.2
- zipp==3.20.2
prefix: /opt/conda/envs/servicelayer
| [
"tests/test_worker.py::WorkerTest::test_run"
] | [] | [] | [] | MIT License | 12,247 | 218 | [
"servicelayer/worker.py"
] |
|
duo-labs__py_webauthn-120 | 381aad2548a884b54bc928dd3fbc37dbe68514c9 | 2022-02-17 23:06:53 | 381aad2548a884b54bc928dd3fbc37dbe68514c9 | diff --git a/webauthn/helpers/structs.py b/webauthn/helpers/structs.py
index 9ae33bc..74274ab 100644
--- a/webauthn/helpers/structs.py
+++ b/webauthn/helpers/structs.py
@@ -44,6 +44,7 @@ class BytesLike(bytes):
- https://github.com/duo-labs/py_webauthn/issues/110
- https://github.com/duo-labs/py_webauthn/issues/113
"""
+
@classmethod
def __get_validators__(cls):
yield cls.validate
@@ -52,6 +53,8 @@ class BytesLike(bytes):
def validate(cls, v):
if isinstance(v, bytes):
return v
+ elif isinstance(v, memoryview):
+ return v.tobytes()
else:
return strict_bytes_validator(v)
| Pydantic/bytes validation doesn't accept Python memoryview
The latest version going back to pydantic with a custom 'bytes' validation class still throws an error during runtime validation in the case of peewee+postresql.
In this case - a python memoryview type is being returned. That should also be 'ok' as being bytes. It seems like it would be very simple to extend the BytesLike validator to handle this case.
Note: the original mongodb issue - which subclassed 'bytes' works fine.... | duo-labs/py_webauthn | diff --git a/tests/test_bytes_subclass_support.py b/tests/test_bytes_subclass_support.py
index c6c9783..3fe50d6 100644
--- a/tests/test_bytes_subclass_support.py
+++ b/tests/test_bytes_subclass_support.py
@@ -7,19 +7,19 @@ from webauthn.helpers.structs import (
)
-class CustomBytes(bytes):
- def __new__(cls, data: str):
- data_bytes = base64url_to_bytes(data)
- self = bytes.__new__(cls, memoryview(data_bytes).tobytes())
- return self
-
-
class TestWebAuthnBytesSubclassSupport(TestCase):
def test_handles_bytes_subclasses(self) -> None:
"""
Ensure the library can support being used in projects that might work with values that are
subclasses of `bytes`. Let's embrace Python's duck-typing, not shy away from it
"""
+
+ class CustomBytes(bytes):
+ def __new__(cls, data: str):
+ data_bytes = base64url_to_bytes(data)
+ self = bytes.__new__(cls, memoryview(data_bytes).tobytes())
+ return self
+
verification = verify_authentication_response(
credential=AuthenticationCredential(
id="fq9Nj0nS24B5y6Pkw_h3-9GEAEA3-0LBPxE2zvTdLjDqtSeCSNYFe9VMRueSpAZxT3YDc6L1lWXdQNwI-sVNYrefEcRR1Nsb_0jpHE955WEtFud2xxZg3MvoLMxHLet63i5tajd1fHtP7I-00D6cehM8ZWlLp2T3s9lfZgVIFcA",
@@ -50,3 +50,43 @@ class TestWebAuthnBytesSubclassSupport(TestCase):
)
assert verification.new_sign_count == 7
+
+ def test_handles_memoryviews(self) -> None:
+ """
+ Ensure support for libraries that leverage memoryviews
+ """
+
+ def base64url_to_memoryview(data: str) -> memoryview:
+ data_bytes = base64url_to_bytes(data)
+ return memoryview(data_bytes)
+
+ verification = verify_authentication_response(
+ credential=AuthenticationCredential(
+ id="fq9Nj0nS24B5y6Pkw_h3-9GEAEA3-0LBPxE2zvTdLjDqtSeCSNYFe9VMRueSpAZxT3YDc6L1lWXdQNwI-sVNYrefEcRR1Nsb_0jpHE955WEtFud2xxZg3MvoLMxHLet63i5tajd1fHtP7I-00D6cehM8ZWlLp2T3s9lfZgVIFcA",
+ raw_id=base64url_to_memoryview(
+ "fq9Nj0nS24B5y6Pkw_h3-9GEAEA3-0LBPxE2zvTdLjDqtSeCSNYFe9VMRueSpAZxT3YDc6L1lWXdQNwI-sVNYrefEcRR1Nsb_0jpHE955WEtFud2xxZg3MvoLMxHLet63i5tajd1fHtP7I-00D6cehM8ZWlLp2T3s9lfZgVIFcA"
+ ),
+ response=AuthenticatorAssertionResponse(
+ authenticator_data=base64url_to_memoryview(
+ "SZYN5YgOjGh0NBcPZHZgW4_krrmihjLHmVzzuoMdl2MBAAAABw"
+ ),
+ client_data_json=base64url_to_memoryview(
+ "eyJ0eXBlIjoid2ViYXV0aG4uZ2V0IiwiY2hhbGxlbmdlIjoiZVo0ZWVBM080ank1Rkl6cURhU0o2SkROR3UwYkJjNXpJMURqUV9rTHNvMVdOcWtHNms1bUNZZjFkdFFoVlVpQldaV2xaa3pSNU1GZWVXQ3BKUlVOWHciLCJvcmlnaW4iOiJodHRwOi8vbG9jYWxob3N0OjUwMDAiLCJjcm9zc09yaWdpbiI6ZmFsc2V9"
+ ),
+ signature=base64url_to_memoryview(
+ "RRWV8mYDRvK7YdQgdtZD4pJ2dh1D_IWZ_D6jsZo6FHJBoenbj0CVT5nA20vUzlRhN4R6dOEUHmUwP1F8eRBhBg"
+ ),
+ ),
+ ),
+ expected_challenge=base64url_to_memoryview(
+ "eZ4eeA3O4jy5FIzqDaSJ6JDNGu0bBc5zI1DjQ_kLso1WNqkG6k5mCYf1dtQhVUiBWZWlZkzR5MFeeWCpJRUNXw"
+ ),
+ expected_rp_id="localhost",
+ expected_origin="http://localhost:5000",
+ credential_public_key=base64url_to_memoryview(
+ "pAEBAycgBiFYIMz6_SUFLiDid2Yhlq0YboyJ-CDrIrNpkPUGmJp4D3Dp"
+ ),
+ credential_current_sign_count=3,
+ )
+
+ assert verification.new_sign_count == 7
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 1
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asn1crypto==1.4.0
black==21.9b0
cbor2==5.4.2.post1
cffi==1.15.0
click==8.0.3
cryptography==36.0.1
exceptiongroup==1.2.2
iniconfig==2.1.0
mccabe==0.6.1
mypy==0.910
mypy-extensions==0.4.3
packaging==24.2
pathspec==0.9.0
platformdirs==2.4.0
pluggy==1.5.0
pycodestyle==2.8.0
pycparser==2.20
pydantic==1.9.0
pyflakes==2.4.0
pyOpenSSL==22.0.0
pytest==8.3.5
regex==2021.10.8
six==1.16.0
toml==0.10.2
tomli==1.2.1
typing-extensions==3.10.0.2
-e git+https://github.com/duo-labs/py_webauthn.git@381aad2548a884b54bc928dd3fbc37dbe68514c9#egg=webauthn
| name: py_webauthn
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asn1crypto==1.4.0
- black==21.9b0
- cbor2==5.4.2.post1
- cffi==1.15.0
- click==8.0.3
- cryptography==36.0.1
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- mccabe==0.6.1
- mypy==0.910
- mypy-extensions==0.4.3
- packaging==24.2
- pathspec==0.9.0
- platformdirs==2.4.0
- pluggy==1.5.0
- pycodestyle==2.8.0
- pycparser==2.20
- pydantic==1.9.0
- pyflakes==2.4.0
- pyopenssl==22.0.0
- pytest==8.3.5
- regex==2021.10.8
- six==1.16.0
- toml==0.10.2
- tomli==1.2.1
- typing-extensions==3.10.0.2
prefix: /opt/conda/envs/py_webauthn
| [
"tests/test_bytes_subclass_support.py::TestWebAuthnBytesSubclassSupport::test_handles_memoryviews"
] | [] | [
"tests/test_bytes_subclass_support.py::TestWebAuthnBytesSubclassSupport::test_handles_bytes_subclasses"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,252 | 209 | [
"webauthn/helpers/structs.py"
] |
|
YosefLab__Hotspot-14 | f571277fb669880ba593fd53834618a57f2abf82 | 2022-02-18 04:33:03 | 8fbabb04cbb809c298f46ad3a9978d535ee7d5ac | diff --git a/hotspot/hotspot.py b/hotspot/hotspot.py
index a858f66..459f24b 100644
--- a/hotspot/hotspot.py
+++ b/hotspot/hotspot.py
@@ -126,7 +126,7 @@ class Hotspot:
self.module_scores = None
def create_knn_graph(
- self, weighted_graph=False, n_neighbors=30, neighborhood_factor=3):
+ self, weighted_graph=False, n_neighbors=30, neighborhood_factor=3, approx_neighbors=True):
"""Create's the KNN graph and graph weights
The resulting matrices containing the neighbors and weights are
@@ -143,12 +143,16 @@ class Hotspot:
relative to the distances within the neighborhood. The weight for
a cell with a distance d will decay as exp(-d/D) where D is the distance
to the `n_neighbors`/`neighborhood_factor`-th neighbor.
+ approx_neighbors: bool
+ Use approximate nearest neighbors or exact scikit-learn neighbors. Only
+ when hotspot initialized with `latent`.
"""
if self.latent is not None:
neighbors, weights = neighbors_and_weights(
self.latent, n_neighbors=n_neighbors,
- neighborhood_factor=neighborhood_factor)
+ neighborhood_factor=neighborhood_factor,
+ approx_neighbors=approx_neighbors)
elif self.tree is not None:
if weighted_graph:
raise ValueError("When using `tree` as the metric space, `weighted_graph=True` is not supported")
diff --git a/hotspot/knn.py b/hotspot/knn.py
index d26bdf2..2831dc1 100644
--- a/hotspot/knn.py
+++ b/hotspot/knn.py
@@ -4,9 +4,10 @@ import pandas as pd
from math import ceil
from numba import jit
from tqdm import tqdm
+from pynndescent import NNDescent
-def neighbors_and_weights(data, n_neighbors=30, neighborhood_factor=3):
+def neighbors_and_weights(data, n_neighbors=30, neighborhood_factor=3, approx_neighbors=True):
"""
Computes nearest neighbors and associated weights for data
Uses euclidean distance between rows of `data`
@@ -23,9 +24,14 @@ def neighbors_and_weights(data, n_neighbors=30, neighborhood_factor=3):
"""
coords = data.values
- nbrs = NearestNeighbors(n_neighbors=n_neighbors,
- algorithm="ball_tree").fit(coords)
- dist, ind = nbrs.kneighbors()
+
+ if approx_neighbors:
+ index = NNDescent(coords, n_neighbors=n_neighbors)
+ ind, dist = index.neighbor_graph
+ else:
+ nbrs = NearestNeighbors(n_neighbors=n_neighbors,
+ algorithm="ball_tree").fit(coords)
+ dist, ind = nbrs.kneighbors()
weights = compute_weights(
dist, neighborhood_factor=neighborhood_factor)
diff --git a/setup.py b/setup.py
index 0aee543..fde915f 100644
--- a/setup.py
+++ b/setup.py
@@ -22,6 +22,7 @@ setup(
'tqdm>=4.32.2',
'statsmodels>=0.9.0',
'scikit-learn>=0.21.2',
+ 'pynndescent>=0.5',
],
extras_require=dict(
test=['pytest>=5.0.0'],
| import knn graph?
Would it be useful to have a function to import and use the neighbor graph computed in scanpy (fast for big datasests)
the function `hs.create_knn_graph` seems a bit slow on very big datasets, and if the nn graph is already being computed for other analyses in scanpy makes sense to reuse this?
Genuinely very useful tool :) | YosefLab/Hotspot | diff --git a/tests/test_bernoulli.py b/tests/test_bernoulli.py
index d908b2a..82530f0 100644
--- a/tests/test_bernoulli.py
+++ b/tests/test_bernoulli.py
@@ -25,7 +25,7 @@ def test_local_autocorrelation_centered():
umi_counts = pd.Series(umi_counts)
neighbors, weights = neighbors_and_weights(
- latent, n_neighbors=30, neighborhood_factor=3
+ latent, n_neighbors=30, neighborhood_factor=3, approx_neighbors=False
)
neighbors = neighbors.values
weights = weights.values
@@ -145,7 +145,7 @@ def test_local_correlation_centered():
gene_p, Gmean, EG
)
assert math.isclose(
- EstdG, Gstd, rel_tol=5e-2
+ EstdG, Gstd, rel_tol=1e-1
), "stdG is off for gene_p={}, Actual={:.2f}, Expected={:.2f}".format(
gene_p, Gstd, EstdG
)
diff --git a/tests/test_local_stats.py b/tests/test_local_stats.py
index 1efea09..21f9b00 100644
--- a/tests/test_local_stats.py
+++ b/tests/test_local_stats.py
@@ -22,24 +22,33 @@ def test_moments_fast():
latent = sim_data.sim_latent(N_CELLS, N_DIM)
latent = pd.DataFrame(latent)
- neighbors, weights = neighbors_and_weights(
- latent, n_neighbors=30, neighborhood_factor=3)
+ def _compute(neighbors, weights, rel_tol=1e-12):
+ neighbors = neighbors.values
+ weights = weights.values
+
+ weights = make_weights_non_redundant(neighbors, weights)
- neighbors = neighbors.values
- weights = weights.values
+ # Just generate random muX, muY, x2, y2
+ muX = np.random.rand(N_CELLS) * 2 + 2
+ x2 = (np.random.rand(N_CELLS) * 2 + 1)**2 + muX**2
- weights = make_weights_non_redundant(neighbors, weights)
+ EG, EG2 = compute_moments_weights_slow(
+ muX, x2, neighbors, weights
+ )
+ EG_fast, EG2_fast = compute_moments_weights(
+ muX, x2, neighbors, weights
+ )
- # Just generate random muX, muY, x2, y2
- muX = np.random.rand(N_CELLS) * 2 + 2
- x2 = (np.random.rand(N_CELLS) * 2 + 1)**2 + muX**2
+ assert math.isclose(EG, EG_fast, rel_tol=rel_tol)
+ assert math.isclose(EG2, EG2_fast, rel_tol=rel_tol)
+
+ neighbors, weights = neighbors_and_weights(
+ latent, n_neighbors=30, neighborhood_factor=3, approx_neighbors=False)
- EG, EG2 = compute_moments_weights_slow(
- muX, x2, neighbors, weights
- )
- EG_fast, EG2_fast = compute_moments_weights(
- muX, x2, neighbors, weights
- )
+ _compute(neighbors, weights)
+
+
+ neighbors, weights = neighbors_and_weights(
+ latent, n_neighbors=30, neighborhood_factor=3, approx_neighbors=True)
- assert math.isclose(EG, EG_fast, rel_tol=1e-12)
- assert math.isclose(EG2, EG2_fast, rel_tol=1e-12)
+ _compute(neighbors, weights, rel_tol=0.005)
diff --git a/tests/test_local_stats_pairs.py b/tests/test_local_stats_pairs.py
index f688831..67213de 100644
--- a/tests/test_local_stats_pairs.py
+++ b/tests/test_local_stats_pairs.py
@@ -27,30 +27,37 @@ def test_pairs():
latent = sim_data.sim_latent(N_CELLS, N_DIM)
latent = pd.DataFrame(latent)
- neighbors, weights = neighbors_and_weights(
- latent, n_neighbors=30, neighborhood_factor=3)
+ def _compute(neighbors, weights, rel_tol=1e-12):
+ neighbors = neighbors.values
+ weights = weights.values
- neighbors = neighbors.values
- weights = weights.values
+ weights = make_weights_non_redundant(neighbors, weights)
- weights = make_weights_non_redundant(neighbors, weights)
+ # Just generate random muX, muY, x2, y2
+ muX = np.random.rand(N_CELLS) * 2 + 2
+ muY = np.random.rand(N_CELLS) * .5 + 1
+ x2 = (np.random.rand(N_CELLS) * 2 + 1)**2 + muX**2
+ y2 = (np.random.rand(N_CELLS) * 3 + .5)**2 + muY**2
- # Just generate random muX, muY, x2, y2
- muX = np.random.rand(N_CELLS) * 2 + 2
- muY = np.random.rand(N_CELLS) * .5 + 1
- x2 = (np.random.rand(N_CELLS) * 2 + 1)**2 + muX**2
- y2 = (np.random.rand(N_CELLS) * 3 + .5)**2 + muY**2
+ EG, EG2 = compute_moments_weights_pairs_slow(
+ muX, x2, muY, y2, neighbors, weights
+ )
+ EG_fast, EG2_fast = compute_moments_weights_pairs(
+ muX, x2, muY, y2, neighbors, weights
+ )
- EG, EG2 = compute_moments_weights_pairs_slow(
- muX, x2, muY, y2, neighbors, weights
- )
- EG_fast, EG2_fast = compute_moments_weights_pairs(
- muX, x2, muY, y2, neighbors, weights
- )
+ assert math.isclose(EG, EG_fast, rel_tol=rel_tol)
+ assert math.isclose(EG2, EG2_fast, rel_tol=rel_tol)
+
+ neighbors, weights = neighbors_and_weights(
+ latent, n_neighbors=30, neighborhood_factor=3, approx_neighbors=False)
+
+ _compute(neighbors, weights)
- assert math.isclose(EG, EG_fast, rel_tol=1e-12)
- assert math.isclose(EG2, EG2_fast, rel_tol=1e-12)
+ neighbors, weights = neighbors_and_weights(
+ latent, n_neighbors=30, neighborhood_factor=3, approx_neighbors=True)
+ _compute(neighbors, weights, rel_tol=0.005)
def test_pairs_fast():
"""
@@ -65,29 +72,39 @@ def test_pairs_fast():
latent = sim_data.sim_latent(N_CELLS, N_DIM)
latent = pd.DataFrame(latent)
- neighbors, weights = neighbors_and_weights(
- latent, n_neighbors=30, neighborhood_factor=3)
+ def _compute(neighbors, weights, rel_tol=1e-12):
+ neighbors = neighbors.values
+ weights = weights.values
- neighbors = neighbors.values
- weights = weights.values
+ weights = make_weights_non_redundant(neighbors, weights)
- weights = make_weights_non_redundant(neighbors, weights)
+ # Just generate random muX, muY, x2, y2
+ muX = np.random.rand(N_CELLS) * 2 + 2
+ muY = np.random.rand(N_CELLS) * .5 + 1
+ x2 = (np.random.rand(N_CELLS) * 2 + 1)**2 + muX**2
+ y2 = (np.random.rand(N_CELLS) * 3 + .5)**2 + muY**2
- # Just generate random muX, muY, x2, y2
- muX = np.random.rand(N_CELLS) * 2 + 2
- muY = np.random.rand(N_CELLS) * .5 + 1
- x2 = (np.random.rand(N_CELLS) * 2 + 1)**2 + muX**2
- y2 = (np.random.rand(N_CELLS) * 3 + .5)**2 + muY**2
+ EG, EG2 = compute_moments_weights_pairs(
+ muX, x2, muY, y2, neighbors, weights
+ )
+ EG_fast, EG2_fast = compute_moments_weights_pairs_fast(
+ muX, x2, muY, y2, neighbors, weights
+ )
- EG, EG2 = compute_moments_weights_pairs(
- muX, x2, muY, y2, neighbors, weights
- )
- EG_fast, EG2_fast = compute_moments_weights_pairs_fast(
- muX, x2, muY, y2, neighbors, weights
- )
+ assert math.isclose(EG, EG_fast, rel_tol=rel_tol)
+ assert math.isclose(EG2, EG2_fast, rel_tol=rel_tol)
+
+
+ neighbors, weights = neighbors_and_weights(
+ latent, n_neighbors=30, neighborhood_factor=3, approx_neighbors=False)
+
+ _compute(neighbors, weights)
+
+ neighbors, weights = neighbors_and_weights(
+ latent, n_neighbors=30, neighborhood_factor=3, approx_neighbors=False)
+
+ _compute(neighbors, weights, rel_tol=0.005)
- assert math.isclose(EG, EG_fast, rel_tol=1e-12)
- assert math.isclose(EG2, EG2_fast, rel_tol=1e-12)
def test_pairs_std():
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install git+https://github.com/yoseflab/Hotspot.git",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"matplotlib>=3.0.0",
"numba>=0.43.1",
"numpy>=1.16.4",
"seaborn>=0.9.0",
"scipy>=1.2.1",
"pandas>=0.24.0",
"tqdm>=4.32.2",
"statsmodels>=0.9.0",
"scikit-learn>=0.21.2",
"pytest>=5.0.0"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | anndata==0.10.9
array_api_compat==1.11.2
contourpy==1.3.0
cycler==0.12.1
exceptiongroup==1.2.2
fonttools==4.56.0
h5py==3.13.0
hotspotsc @ git+https://github.com/yoseflab/Hotspot.git@351826c8704925003290692d573c9bbe6b6cbc4a
importlib_resources==6.5.2
iniconfig==2.1.0
joblib==1.4.2
kiwisolver==1.4.7
llvmlite==0.43.0
matplotlib==3.9.4
natsort==8.4.0
numba==0.60.0
numpy==2.0.2
packaging==24.2
pandas==2.2.3
patsy==1.0.1
pillow==11.1.0
pluggy==1.5.0
pynndescent==0.5.13
pyparsing==3.2.3
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
scikit-learn==1.6.1
scipy==1.13.1
seaborn==0.13.2
six==1.17.0
statsmodels==0.14.4
threadpoolctl==3.6.0
tomli==2.2.1
tqdm==4.67.1
tzdata==2025.2
zipp==3.21.0
| name: Hotspot
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- anndata==0.10.9
- array-api-compat==1.11.2
- contourpy==1.3.0
- cycler==0.12.1
- exceptiongroup==1.2.2
- fonttools==4.56.0
- h5py==3.13.0
- hotspotsc==1.1.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- joblib==1.4.2
- kiwisolver==1.4.7
- llvmlite==0.43.0
- matplotlib==3.9.4
- natsort==8.4.0
- numba==0.60.0
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- patsy==1.0.1
- pillow==11.1.0
- pluggy==1.5.0
- pynndescent==0.5.13
- pyparsing==3.2.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scikit-learn==1.6.1
- scipy==1.13.1
- seaborn==0.13.2
- six==1.17.0
- statsmodels==0.14.4
- threadpoolctl==3.6.0
- tomli==2.2.1
- tqdm==4.67.1
- tzdata==2025.2
- zipp==3.21.0
prefix: /opt/conda/envs/Hotspot
| [
"tests/test_bernoulli.py::test_local_autocorrelation_centered",
"tests/test_local_stats.py::test_moments_fast",
"tests/test_local_stats_pairs.py::test_pairs",
"tests/test_local_stats_pairs.py::test_pairs_fast"
] | [] | [
"tests/test_bernoulli.py::test_local_correlation_centered",
"tests/test_local_stats_pairs.py::test_pairs_std",
"tests/test_local_stats_pairs.py::test_local_correlation"
] | [] | MIT License | 12,253 | 809 | [
"hotspot/hotspot.py",
"hotspot/knn.py",
"setup.py"
] |
|
joergbuchwald__ogs6py-44 | 91fd62ff3b856aac6c2f98724199acded0183e2f | 2022-02-18 21:52:54 | d47c3e01b09eb02bc7a3d98776f834803261fa4a | diff --git a/examples/example_THM.py b/examples/example_THM.py
index 6430ec3..632afcc 100644
--- a/examples/example_THM.py
+++ b/examples/example_THM.py
@@ -39,9 +39,32 @@ model.media.add_property(medium_id="0",
name="density",
type="Linear",
reference_value="999.1",
- variable_name="temperature",
- reference_condition="273.15",
- slope="-4e-4")
+ independent_variables={"temperature": {
+ "reference_condition":273.15,
+ "slope":-4e-4},
+ "phase_pressure": {
+ "reference_condition": 1e5,
+ "slope": 1e-20
+ }})
+# Alternative density models using property type Exponential or Function
+#model.media.add_property(medium_id="0",
+# phase_type="AqueousLiquid",
+# name="density",
+# type="Exponential",
+# reference_value="999.1",
+# offset="0.0",
+# exponent={"variable_name": "temperature",
+# "reference_condition":273.15,
+# "factor":-4e-4})
+#model.media.add_property(medium_id="0",
+# phase_type="AqueousLiquid",
+# name="density",
+# type="Function",
+# expression="999.1",
+# dvalues={"temperature": {
+# "expression":0.0},
+# "phase_pressure": {
+# "expression": 0.0}})
model.media.add_property(medium_id="0",
phase_type="AqueousLiquid",
name="thermal_expansivity",
diff --git a/ogs6py/classes/media.py b/ogs6py/classes/media.py
index 0aa7e8f..a12ab79 100644
--- a/ogs6py/classes/media.py
+++ b/ogs6py/classes/media.py
@@ -22,6 +22,239 @@ class Media(build_tree.BuildTree):
'children': {}
}
}
+ self.properties = {"AverageMolarMass": [],
+ "BishopsSaturationCutoff": ["cutoff_value"],
+ "BishopsPowerLaw": ["exponent"],
+ "CapillaryPressureRegularizedVanGenuchten": ["exponent",
+ "p_b",
+ "residual_gas_saturation",
+ "residual_liquid_saturation"],
+ "CapillaryPressureVanGenuchten": ["exponent",
+ "maximum_capillary_pressure"
+ "p_b",
+ "residual_gas_saturation",
+ "residual_liquid_saturation"],
+ "ClausiusClapeyron": ["critical_pressure",
+ "critical_temperature",
+ "reference_pressure",
+ "reference_temperature",
+ "triple_pressure",
+ "triple_temperature"],
+ "Constant": ["value"],
+ "Curve" : ["curve", "independent_variable"],
+ "DupuitPermeability": ["parameter_name"],
+ "EffectiveThermalConductivityPorosityMixing": [],
+ "EmbeddedFracturePermeability": ["intrinsic_permeability",
+ "initial_aperture",
+ "mean_frac_distance",
+ "threshold_strain",
+ "fracture_normal",
+ "fracture_rotation_xy",
+ "fracture_rotation_yz"],
+ "Function": ["value"],
+ "Exponential": ["offset","reference_value"],
+ "GasPressureDependentPermeability": ["initial_permeability",
+ "a1", "a2",
+ "pressure_threshold",
+ "minimum_permeability",
+ "maximum_permeability"],
+ "IdealGasLaw": [],
+ "IdealGasLawBinaryMixture": [],
+ "KozenyCarmanModel": ["intitial_permeability", "initial_prosity"],
+ "Linear": ["reference_value"],
+ "LinearSaturationSwellingStress" : ["coefficient", "reference_saturation"],
+ "LinearWaterVapourLatentHeat" : [],
+ "OrthotropicEmbeddedFracturePermeability": ["intrinsic_permeability",
+ "mean_frac_distances",
+ "threshold_strains",
+ "fracture_normals",
+ "fracture_rotation_xy",
+ "fracture_rotation_yz",
+ "jacobian_factor"],
+ "Parameter": ["parameter_name"],
+ "PermeabilityMohrCoulombFailureIndexModel": ["cohesion",
+ "fitting_factor",
+ "friction_angle",
+ "initial_ppermeability",
+ "maximum_permeability",
+ "reference_permeability",
+ "tensile_strength_parameter"],
+ "PermeabilityOrthotropicPowerLaw": ["exponents",
+ "intrinsic_permeabilities"],
+ "PorosityFromMassBalance": ["initial_porosity",
+ "maximal_porosity",
+ "minimal_porosity"],
+ "RelPermBrooksCorey": ["lambda",
+ "min_relative_permeability"
+ "residual_gas_saturation",
+ "residual_liquid_saturation"],
+ "RelPermBrooksCoreyNonwettingPhase": ["lambda",
+ "min_relative_permeability"
+ "residual_gas_saturation",
+ "residual_liquid_saturation"],
+ "RelPermLiakopoulos": [],
+ "RelativePermeabilityNonWettingVanGenuchten": ["exponent",
+ "minimum_relative_permeability",
+ "residual_gas_saturation",
+ "residual_liquid_saturation"],
+ "RelativePermeabilityUdell": ["min_relative_permeability",
+ "residual_gas_saturation",
+ "residual_liquid_saturation"],
+ "RelativePermeabilityUdellNonwettingPhase": ["min_relative_permeability",
+ "residual_gas_saturation",
+ "residual_liquid_saturation"],
+ "RelativePermeabilityVanGenuchten": ["exponent",
+ "minimum_relative_permeability_liquid",
+ "residual_gas_saturation",
+ "residual_liquid_saturation"],
+ "SaturationBrooksCorey": ["entry_pressure",
+ "lambda",
+ "residual_gas_saturation",
+ "residual_liquid_saturation"],
+ "SaturationDependentSwelling": ["exponents",
+ "lower_saturation_limit",
+ "swelling_pressures",
+ "upper_saturation_limit"],
+ "SaturationDependentThermalConductivity": ["dry","wet"],
+ "SaturationExponential": ["exponent",
+ "maximum_capillary_pressure",
+ "residual_gas_saturation",
+ "residual_liquid_saturation"],
+ "SaturationLiakopoulos": [],
+ "SaturationVanGenuchten": ["exponent",
+ "p_b",
+ "residual_gas_saturation",
+ "residual_liquid_saturation"],
+ "SoilThermalConductivitySomerton": ["dry_thermal_conductivity",
+ "wet_thermal_conductivity"],
+ "StrainDependentPermeability": ["initial_permeability",
+ "b1", "b2", "b3",
+ "minimum_permeability",
+ "maximum_permeability"],
+ "TemperatureDependentDiffusion": ["activation_energy",
+ "reference_diffusion",
+ "reference_temperature"],
+ "TransportPorosityFromMassBalance": ["initial_porosity",
+ "maximal_porosity",
+ "minimal_porosity"],
+ "VapourDiffusionFEBEX": ["tortuosity"],
+ "VapourDiffusionPMQ": [],
+ "VermaPruessModel": ["critical_porosity",
+ "exponent",
+ "initial_permeability",
+ "initial_porosity"],
+ "WaterVapourDensity": [],
+ "WaterVapourLatentHeatWithCriticalTemperature": []
+ }
+
+ def _generate_generic_property(self, args):
+ property_parameters = {}
+ for parameter in self.properties[args["type"]]:
+ property_parameters[parameter] = {
+ 'tag': parameter,
+ 'text': args[parameter],
+ 'attr': {},
+ 'children': {}
+ }
+ return property_parameters
+ def _generate_linear_property(self, args):
+ property_parameters = {}
+ for parameter in self.properties[args["type"]]:
+ property_parameters[parameter] = {
+ 'tag': parameter,
+ 'text': args[parameter],
+ 'attr': {},
+ 'children': {}
+ }
+ for var, param in args["independent_variables"].items():
+ property_parameters[f"independent_variable{var}"] = {
+ 'tag': 'independent_variable',
+ 'text': '',
+ 'attr': {},
+ 'children': {}
+ }
+ indep_var = property_parameters[f"independent_variable{var}"]['children']
+ indep_var['variable_name'] = {
+ 'tag': 'variable_name',
+ 'text': var,
+ 'attr': {},
+ 'children': {}
+ }
+ attributes = ['reference_condition','slope']
+ for attrib in attributes:
+ indep_var[attrib] = {
+ 'tag': attrib,
+ 'text': str(param[attrib]),
+ 'attr': {},
+ 'children': {}
+ }
+ return property_parameters
+ def _generate_function_property(self, args):
+ property_parameters = {}
+ for parameter in self.properties[args["type"]]:
+ property_parameters[parameter] = {
+ 'tag': parameter,
+ 'text': "",
+ 'attr': {},
+ 'children': {}
+ }
+ property_parameters["value"]["children"]["expression"] = {
+ 'tag': "expression",
+ 'text': args["expression"],
+ 'attr': {},
+ 'children': {}
+ }
+ for dvar in args["dvalues"]:
+ property_parameters[f"dvalue{dvar}"] = {
+ 'tag': "dvalue",
+ 'text': "",
+ 'attr': {},
+ 'children': {}
+ }
+ property_parameters[f"dvalue{dvar}"]["children"]["variable_name"] = {
+ 'tag': "variable_name",
+ 'text': dvar,
+ 'attr': {},
+ 'children': {}
+ }
+ property_parameters[f"dvalue{dvar}"]["children"]["expression"] = {
+ 'tag': "expression",
+ 'text': args["dvalues"][dvar]["expression"],
+ 'attr': {},
+ 'children': {}
+ }
+ return property_parameters
+ def _generate_exponential_property(self, args):
+ property_parameters = {}
+ for parameter in self.properties[args["type"]]:
+ property_parameters[parameter] = {
+ 'tag': parameter,
+ 'text': args[parameter],
+ 'attr': {},
+ 'children': {}
+ }
+ property_parameters["exponent"] = {
+ 'tag': 'exponent',
+ 'text': '',
+ 'attr': {},
+ 'children': {}
+ }
+ indep_var = property_parameters["exponent"]['children']
+ indep_var['variable_name'] = {
+ 'tag': 'variable_name',
+ 'text': args["exponent"]["variable_name"],
+ 'attr': {},
+ 'children': {}
+ }
+ attributes = ['reference_condition','factor']
+ for attrib in attributes:
+ indep_var[attrib] = {
+ 'tag': attrib,
+ 'text': str(args["exponent"][attrib]),
+ 'attr': {},
+ 'children': {}
+ }
+ return property_parameters
def add_property(self, **args):
"""
@@ -106,76 +339,26 @@ class Media(build_tree.BuildTree):
'attr': {},
'children': {}
}
- phase[args['name']]['children']['name'] = {
- 'tag': 'name',
- 'text': args['name'],
- 'attr': {},
- 'children': {}
- }
- phase[args['name']]['children']['type'] = {
- 'tag': 'type',
- 'text': args['type'],
- 'attr': {},
- 'children': {}
- }
- if args['type'] == "Constant":
- phase[args['name']]['children']['value'] = {
- 'tag': 'value',
- 'text': args['value'],
- 'attr': {},
- 'children': {}
- }
- elif args['type'] == "Linear":
- phase[args['name']]['children']['reference_value'] = {
- 'tag': 'reference_value',
- 'text': args['reference_value'],
+ base_property_param = ["name", "type"]
+ for param in base_property_param:
+ phase[args['name']]['children'][param] = {
+ 'tag': param,
+ 'text': args[param],
'attr': {},
'children': {}
- }
- phase[args['name']]['children']['independent_variable'] = {
- 'tag': 'independent_variable',
- 'text': '',
- 'attr': {},
- 'children': {}
- }
- indep_var = phase[args['name']]['children']['independent_variable']['children']
- indep_var['variable_name'] = {
- 'tag': 'variable_name',
- 'text': args['variable_name'],
- 'attr': {},
- 'children': {}
- }
- indep_var['reference_condition'] = {
- 'tag': 'reference_condition',
- 'text': args['reference_condition'],
- 'attr': {},
- 'children': {}
- }
- indep_var['slope'] = {
- 'tag': 'slope',
- 'text': args['slope'],
- 'attr': {},
- 'children': {}
- }
- elif args['type'] == "Parameter":
- phase[args['name']]['children']['parameter'] = {
- 'tag': 'parameter_name',
- 'text': args['parameter_name'],
- 'attr': {},
- 'children': {}
- }
- elif args['type'] == "BishopsSaturationCutoff":
- phase[args['name']]['children']['cutoff_value'] = {
- 'tag': 'cutoff_value',
- 'text': args['cutoff_value'],
- 'attr': {},
- 'children': {}
- }
- elif args['type'] == "BishopsPowerLaw":
- phase[args['name']]['children']['exponent'] = {
- 'tag': 'exponent',
- 'text': args['exponent'],
- 'attr': {},
- 'children': {}
- }
-
+ }
+ try:
+ if args['type'] == "Linear":
+ phase[args['name']]['children'].update(self._generate_linear_property(args))
+ elif args['type'] == "Exponential":
+ phase[args['name']]['children'].update(self._generate_exponential_property(args))
+ elif args['type'] == "Function":
+ phase[args['name']]['children'].update(self._generate_function_property(args))
+ else:
+ phase[args['name']]['children'].update(self._generate_generic_property(args))
+ except KeyError:
+ print("Material property parameters incomplete for")
+ if "phase_type" in args:
+ print(f"Medium {args['medium_id']}->{args['phase_type']}->{args['name']}[{args['type']}]")
+ else:
+ print(f"Medium {args['medium_id']}->{args['name']}[{args['type']}]")
| extending add_property for new MPL models
Hi Joerg,
we want to use the following MPL model with OGS6PY. But it is not considered yet.
```python
model.media.add_property(medium_id="0",
name="permeability",
type="GasPressureDependentPermeability",
initial_permeability="1e-15",
a1="0.125",
a2="152",
pressure_threshold="3.2e6",
minimum_permeability="0.0" ,
maximum_permeability="8.0e-16")
```
THis is actually how the model is translated
```xml
<property>
<name>permeability</name>
<type>GasPressureDependentPermeability</type>
</property>
```
and here is how the model should be:
```xml
<property>
<name>permeability</name>
<type>GasPressureDependentPermeability</type>
<initial_permeability>permeability0</initial_permeability>
<a1>0.125</a1>
<a2>152</a2>
<pressure_threshold>3.2e6</pressure_threshold>
<minimum_permeability>0.0</minimum_permeability>
<maximum_permeability>8.0e-16</maximum_permeability>
</property>
```
Could you please add this model into ogs6py ?
Thanks, Eric | joergbuchwald/ogs6py | diff --git a/tests/test_ogs6py.py b/tests/test_ogs6py.py
index ee957e9..4f80bdc 100644
--- a/tests/test_ogs6py.py
+++ b/tests/test_ogs6py.py
@@ -68,13 +68,14 @@ class TestiOGS(unittest.TestCase):
type="Constant",
value="0.6")
model.media.add_property(medium_id="0",
- phase_type="AqueousLiquid",
- name="density",
- type="Linear",
- reference_value="999.1",
- variable_name="phase_pressure",
- reference_condition="1e5",
- slope="4.5999999999999996e-10")
+ phase_type="AqueousLiquid",
+ name="density",
+ type="Linear",
+ reference_value="999.1",
+ independent_variables={"phase_pressure": {
+ "reference_condition": "1e5",
+ "slope": "4.5999999999999996e-10"
+ }})
model.media.add_property(medium_id="0",
phase_type="AqueousLiquid",
name="thermal_expansivity",
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 0.35 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
lxml==5.3.1
numpy==2.0.2
-e git+https://github.com/joergbuchwald/ogs6py.git@91fd62ff3b856aac6c2f98724199acded0183e2f#egg=ogs6py
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tzdata==2025.2
| name: ogs6py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- lxml==5.3.1
- numpy==2.0.2
- pandas==2.2.3
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- tzdata==2025.2
prefix: /opt/conda/envs/ogs6py
| [
"tests/test_ogs6py.py::TestiOGS::test_buildfromscratch"
] | [] | [
"tests/test_ogs6py.py::TestiOGS::test_add_block",
"tests/test_ogs6py.py::TestiOGS::test_add_entry",
"tests/test_ogs6py.py::TestiOGS::test_empty_replace",
"tests/test_ogs6py.py::TestiOGS::test_model_run",
"tests/test_ogs6py.py::TestiOGS::test_parallel_1_compare_serial_info",
"tests/test_ogs6py.py::TestiOGS::test_parallel_3_debug",
"tests/test_ogs6py.py::TestiOGS::test_remove_element",
"tests/test_ogs6py.py::TestiOGS::test_replace_block_by_include",
"tests/test_ogs6py.py::TestiOGS::test_replace_medium_property",
"tests/test_ogs6py.py::TestiOGS::test_replace_mesh",
"tests/test_ogs6py.py::TestiOGS::test_replace_parameter",
"tests/test_ogs6py.py::TestiOGS::test_replace_phase_property",
"tests/test_ogs6py.py::TestiOGS::test_replace_property_in_include",
"tests/test_ogs6py.py::TestiOGS::test_replace_text",
"tests/test_ogs6py.py::TestiOGS::test_serial_convergence_coupling_iteration_long",
"tests/test_ogs6py.py::TestiOGS::test_serial_convergence_newton_iteration_long",
"tests/test_ogs6py.py::TestiOGS::test_serial_critical",
"tests/test_ogs6py.py::TestiOGS::test_serial_time_vs_iterations",
"tests/test_ogs6py.py::TestiOGS::test_serial_warning_only"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,257 | 3,540 | [
"examples/example_THM.py",
"ogs6py/classes/media.py"
] |
|
PMEAL__OpenPNM-2333 | 007c521a4cb93d45beb26cb878b9b1ee1762a4f8 | 2022-02-24 16:46:45 | e4c9f799076c334904088df34b8f04da7802a08b | diff --git a/openpnm/models/geometry/pore_surface_area/_funcs.py b/openpnm/models/geometry/pore_surface_area/_funcs.py
index 877347415..b5cc123a2 100644
--- a/openpnm/models/geometry/pore_surface_area/_funcs.py
+++ b/openpnm/models/geometry/pore_surface_area/_funcs.py
@@ -42,11 +42,11 @@ def sphere(
"""
network = target.project.network
- R = target[pore_diameter] / 2
- Asurf = 4 * _np.pi * R**2
- Tn = network.find_neighbor_throats(pores=target.Ps, flatten=False)
- Tsurf = _np.array([network[throat_cross_sectional_area][Ts].sum() for Ts in Tn])
- value = Asurf - Tsurf
+ R = network[pore_diameter] / 2
+ value = 4 * _np.pi * R**2
+ Tca = network[throat_cross_sectional_area]
+ _np.subtract.at(value, network.conns.flatten(), _np.repeat(Tca, repeats=2))
+ value = value[network.pores(target.name)]
return value
@@ -74,11 +74,10 @@ def circle(
"""
network = target.project.network
- R = target[pore_diameter] / 2
- Asurf = 2 * _np.pi * R
- Tn = network.find_neighbor_throats(pores=target.Ps, flatten=False)
- Tsurf = _np.array([network[throat_cross_sectional_area][Ts].sum() for Ts in Tn])
- value = Asurf - Tsurf
+ value = _np.pi * network[pore_diameter]
+ Tca = network[throat_cross_sectional_area]
+ _np.subtract.at(value, network.conns.flatten(), _np.repeat(Tca, repeats=2))
+ value = value[network.pores(target.name)]
return value
@@ -101,10 +100,11 @@ def cube(
"""
network = target.project.network
- D = target[pore_diameter]
- Tn = network.find_neighbor_throats(pores=target.Ps, flatten=False)
- Tsurf = _np.array([network[throat_cross_sectional_area][Ts].sum() for Ts in Tn])
- value = 6 * D**2 - Tsurf
+ D = network[pore_diameter]
+ value = 6.0 * D**2
+ Tca = network[throat_cross_sectional_area]
+ _np.subtract.at(value, network.conns.flatten(), _np.repeat(Tca, repeats=2))
+ value = value[network.pores(target.name)]
return value
@@ -127,8 +127,9 @@ def square(
"""
network = target.project.network
- D = target[pore_diameter]
- Tn = network.find_neighbor_throats(pores=target.Ps, flatten=False)
- Tsurf = _np.array([network[throat_cross_sectional_area][Ts].sum() for Ts in Tn])
- value = 4 * D - Tsurf
+ D = network[pore_diameter]
+ value = 4.0 * D
+ Tca = network[throat_cross_sectional_area]
+ _np.subtract.at(value, network.conns.flatten(), _np.repeat(Tca, repeats=2))
+ value = value[network.pores(target.name)]
return value
| pore_area models are needlessly slow
We're using list comprehensions in these models, but could use unbuffered vector operations:
``` python
import numpy as np
import openpnm as op
net = op.network.Cubic(shape=[3, 3])
net['pore.diameter'] = 1
net['throat.diameter'] = np.random.rand(net.Nt)*0.5
net['throat.length'] = 1
net['throat.cross_sectional_area'] = 4/3*np.pi*net['throat.diameter']**2
net['throat.surface_area'] = np.pi*net['throat.diameter']*net['throat.length']
# Find surface are of sphere
temp = 4*np.pi*(0.5*net['pore.diameter'])**2
# Subtract throat openings
a = net['throat.cross_sectional_area']
np.subtract.at(temp, net.conns.flatten(), np.hstack((a, a)))
# Readd throat surface areeas
a = net['throat.surface_area']/2
np.add.at(temp, net.conns.flatten(), np.hstack((a, a)))
``` | PMEAL/OpenPNM | diff --git a/tests/unit/models/geometry/PoreSurfaceAreaTest.py b/tests/unit/models/geometry/PoreSurfaceAreaTest.py
index e8e877fcd..fd01e628d 100644
--- a/tests/unit/models/geometry/PoreSurfaceAreaTest.py
+++ b/tests/unit/models/geometry/PoreSurfaceAreaTest.py
@@ -49,6 +49,33 @@ class PoreSurfaceAreaTest:
b = np.unique(self.geo['pore.surface_area'])
assert_allclose(a, b)
+ def test_circle_multi_geom(self):
+ self.net = op.network.Cubic(shape=[10, 1, 1], spacing=1.0)
+ self.geo1 = op.geometry.GenericGeometry(network=self.net,
+ pores=self.net.Ps[0:3],
+ throats=self.net.Ts[0:3])
+ self.geo2 = op.geometry.GenericGeometry(network=self.net,
+ pores=self.net.Ps[3:],
+ throats=self.net.Ts[3:])
+ self.geo1['pore.diameter'] = 1
+ self.geo1['throat.cross_sectional_area'] = 0.3
+ self.geo2['pore.diameter'] = 1
+ self.geo2['throat.cross_sectional_area'] = 0.1
+ self.geo2.add_model(propname='pore.surface_area',
+ model=mods.circle,
+ regen_mode='normal')
+ self.geo1.add_model(propname='pore.surface_area',
+ model=mods.circle,
+ regen_mode='normal')
+ a = np.array([2.84159265, 2.54159265, 2.54159265])
+ b = self.geo1['pore.surface_area']
+ c = np.array([2.74159265, 2.94159265, 2.94159265,
+ 2.94159265, 2.94159265, 2.94159265,
+ 3.04159265])
+ d = self.geo2['pore.surface_area']
+ assert_allclose(a, b)
+ assert_allclose(c, d)
+
if __name__ == '__main__':
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 2.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.8",
"reqs_path": [
"requirements/pip_requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asttokens==3.0.0
attr==0.3.2
attrs==25.3.0
auto-all==1.4.1
backcall==0.2.0
chemicals==1.3.3
contourpy==1.1.1
cycler==0.12.1
decorator==5.2.1
docrep==0.3.2
exceptiongroup==1.2.2
executing==2.2.0
flatdict==4.0.1
fluids==1.1.0
fonttools==4.56.0
gitdb==4.0.12
GitPython==3.1.44
h5py==3.11.0
imageio==2.35.1
importlib_metadata==8.5.0
importlib_resources==6.4.5
iniconfig==2.1.0
intel-cmplr-lib-ur==2025.1.0
intel-openmp==2025.1.0
ipython==8.12.3
jedi==0.19.2
json-tricks==3.17.3
jsonschema==4.23.0
jsonschema-specifications==2023.12.1
kiwisolver==1.4.7
lazy_loader==0.4
llvmlite==0.41.1
matplotlib==3.7.5
matplotlib-inline==0.1.7
mkl==2025.1.0
mpmath==1.3.0
networkx==3.1
numba==0.58.1
numpy==1.24.4
-e git+https://github.com/PMEAL/OpenPNM.git@007c521a4cb93d45beb26cb878b9b1ee1762a4f8#egg=openpnm
packaging==24.2
pandas==2.0.3
parso==0.8.4
pexpect==4.9.0
pickleshare==0.7.5
pillow==10.4.0
pkgutil_resolve_name==1.3.10
pluggy==1.5.0
prompt_toolkit==3.0.50
ptyprocess==0.7.0
pure_eval==0.2.3
Pygments==2.19.1
pypardiso==0.4.6
pyparsing==3.1.4
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
PyWavelets==1.4.1
referencing==0.35.1
rpds-py==0.20.1
scikit-image==0.21.0
scipy==1.10.1
six==1.17.0
smmap==5.0.2
stack-data==0.6.3
sympy==1.13.3
tbb==2022.1.0
tcmlib==1.3.0
terminaltables==3.1.10
tifffile==2023.7.10
tomli==2.2.1
tqdm==4.67.1
traitlets==5.14.3
traits==7.0.2
transforms3d==0.4.2
typing_extensions==4.13.0
tzdata==2025.2
umf==0.10.0
wcwidth==0.2.13
zipp==3.20.2
| name: OpenPNM
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=24.2=py38h06a4308_0
- python=3.8.20=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.1.0=py38h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.44.0=py38h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asttokens==3.0.0
- attr==0.3.2
- attrs==25.3.0
- auto-all==1.4.1
- backcall==0.2.0
- chemicals==1.3.3
- contourpy==1.1.1
- cycler==0.12.1
- decorator==5.2.1
- docrep==0.3.2
- exceptiongroup==1.2.2
- executing==2.2.0
- flatdict==4.0.1
- fluids==1.1.0
- fonttools==4.56.0
- gitdb==4.0.12
- gitpython==3.1.44
- h5py==3.11.0
- imageio==2.35.1
- importlib-metadata==8.5.0
- importlib-resources==6.4.5
- iniconfig==2.1.0
- intel-cmplr-lib-ur==2025.1.0
- intel-openmp==2025.1.0
- ipython==8.12.3
- jedi==0.19.2
- json-tricks==3.17.3
- jsonschema==4.23.0
- jsonschema-specifications==2023.12.1
- kiwisolver==1.4.7
- lazy-loader==0.4
- llvmlite==0.41.1
- matplotlib==3.7.5
- matplotlib-inline==0.1.7
- mkl==2025.1.0
- mpmath==1.3.0
- networkx==3.1
- numba==0.58.1
- numpy==1.24.4
- packaging==24.2
- pandas==2.0.3
- parso==0.8.4
- pexpect==4.9.0
- pickleshare==0.7.5
- pillow==10.4.0
- pkgutil-resolve-name==1.3.10
- pluggy==1.5.0
- prompt-toolkit==3.0.50
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pygments==2.19.1
- pypardiso==0.4.6
- pyparsing==3.1.4
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pywavelets==1.4.1
- referencing==0.35.1
- rpds-py==0.20.1
- scikit-image==0.21.0
- scipy==1.10.1
- six==1.17.0
- smmap==5.0.2
- stack-data==0.6.3
- sympy==1.13.3
- tbb==2022.1.0
- tcmlib==1.3.0
- terminaltables==3.1.10
- tifffile==2023.7.10
- tomli==2.2.1
- tqdm==4.67.1
- traitlets==5.14.3
- traits==7.0.2
- transforms3d==0.4.2
- typing-extensions==4.13.0
- tzdata==2025.2
- umf==0.10.0
- wcwidth==0.2.13
- zipp==3.20.2
prefix: /opt/conda/envs/OpenPNM
| [
"tests/unit/models/geometry/PoreSurfaceAreaTest.py::PoreSurfaceAreaTest::test_circle_multi_geom"
] | [] | [
"tests/unit/models/geometry/PoreSurfaceAreaTest.py::PoreSurfaceAreaTest::test_sphere",
"tests/unit/models/geometry/PoreSurfaceAreaTest.py::PoreSurfaceAreaTest::test_circle",
"tests/unit/models/geometry/PoreSurfaceAreaTest.py::PoreSurfaceAreaTest::test_cube",
"tests/unit/models/geometry/PoreSurfaceAreaTest.py::PoreSurfaceAreaTest::test_square"
] | [] | MIT License | 12,294 | 812 | [
"openpnm/models/geometry/pore_surface_area/_funcs.py"
] |
|
microsoft__debugpy-853 | 6134532263fa046ae231c14bc8bcd3f2191f8c06 | 2022-02-25 15:27:59 | 356f665325a645a628669acd82d2a33323a26e5b | sonarcloud[bot]: Please retry analysis of this Pull-Request directly on [SonarCloud](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=853).
sonarcloud[bot]: Kudos, SonarCloud Quality Gate passed! 
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=853&resolved=false&types=BUG) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=853&resolved=false&types=BUG) [0 Bugs](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=853&resolved=false&types=BUG)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=853&resolved=false&types=VULNERABILITY) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=853&resolved=false&types=VULNERABILITY) [0 Vulnerabilities](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=853&resolved=false&types=VULNERABILITY)
[](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=853&resolved=false&types=SECURITY_HOTSPOT) [](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=853&resolved=false&types=SECURITY_HOTSPOT) [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=853&resolved=false&types=SECURITY_HOTSPOT)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=853&resolved=false&types=CODE_SMELL) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=853&resolved=false&types=CODE_SMELL) [0 Code Smells](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=853&resolved=false&types=CODE_SMELL)
[](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=853&metric=new_coverage&view=list) [100.0% Coverage](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=853&metric=new_coverage&view=list)
[](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=853&metric=new_duplicated_lines_density&view=list) [0.0% Duplication](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=853&metric=new_duplicated_lines_density&view=list)
| diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_comm.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_comm.py
index 3cd4290f..6e41ae72 100644
--- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_comm.py
+++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_comm.py
@@ -1161,7 +1161,7 @@ def internal_evaluate_expression_json(py_db, request, thread_id):
if hasattr(fmt, 'to_dict'):
fmt = fmt.to_dict()
- if context == 'repl':
+ if context == 'repl' and not py_db.is_output_redirected:
ctx = pydevd_io.redirect_stream_to_pydb_io_messages_context()
else:
ctx = NULL
diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command_json.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command_json.py
index bda519b8..d1449a40 100644
--- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command_json.py
+++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command_json.py
@@ -389,11 +389,15 @@ class PyDevJsonCommandProcessor(object):
if bool(path_mappings):
pydevd_file_utils.setup_client_server_paths(path_mappings)
+ redirecting = args.get("isOutputRedirected")
if self._options.redirect_output:
py_db.enable_output_redirection(True, True)
+ redirecting = True
else:
py_db.enable_output_redirection(False, False)
+ py_db.is_output_redirected = redirecting
+
self.api.set_show_return_values(py_db, self._options.show_return_value)
if not self._options.break_system_exit_zero:
diff --git a/src/debugpy/_vendored/pydevd/pydevd.py b/src/debugpy/_vendored/pydevd/pydevd.py
index e729239d..4b9069b2 100644
--- a/src/debugpy/_vendored/pydevd/pydevd.py
+++ b/src/debugpy/_vendored/pydevd/pydevd.py
@@ -639,6 +639,9 @@ class PyDB(object):
self.show_return_values = False
self.remove_return_values_flag = False
self.redirect_output = False
+ # Note that besides the `redirect_output` flag, we also need to consider that someone
+ # else is already redirecting (i.e.: debugpy).
+ self.is_output_redirected = False
# this flag disables frame evaluation even if it's available
self.use_frame_eval = True
diff --git a/src/debugpy/adapter/clients.py b/src/debugpy/adapter/clients.py
index c1e79f63..d0b3fa14 100644
--- a/src/debugpy/adapter/clients.py
+++ b/src/debugpy/adapter/clients.py
@@ -217,6 +217,7 @@ class Client(components.Component):
arguments = request.arguments
if self.launcher:
+ redirecting = arguments.get("console") == "internalConsole"
if "RedirectOutput" in debug_options:
# The launcher is doing output redirection, so we don't need the
# server to do it, as well.
@@ -224,10 +225,14 @@ class Client(components.Component):
arguments["debugOptions"] = list(
debug_options - {"RedirectOutput"}
)
+ redirecting = True
if arguments.get("redirectOutput"):
arguments = dict(arguments)
del arguments["redirectOutput"]
+ redirecting = True
+
+ arguments["isOutputRedirected"] = redirecting
# pydevd doesn't send "initialized", and responds to the start request
# immediately, without waiting for "configurationDone". If it changes
| I see `print` outputs twice in Debug Console when launch config uses `"console": "internalConsole"`
Issue Type: <b>Bug</b>
Launch config:
```json
{
"name": "Python: Current File",
"type": "python",
"request": "launch",
"program": "${file}",
"console": "internalConsole",
}
```
Then debug any file.
In Debug Console, enter `print(2)`.
# Expected Behaviour
I see "2" (once)
## Expected vs. Actual Behaviour

# Diagnostic data
- Python version (& distribution if applicable, e.g. Anaconda): 3.10.0
- Type of virtual environment used (e.g. conda, venv, virtualenv, etc.): Global
- Value of the `python.languageServer` setting: Default
Extension version: 2021.12.1559732655
VS Code version: Code 1.63.2 (899d46d82c4c95423fb7e10e68eba52050e30ba3, 2021-12-15T09:40:02.816Z)
OS version: Windows_NT x64 10.0.19044
Restricted Mode: No
Remote OS version: Linux x64 5.3.18-lp152.44-default
<!-- generated by issue reporter --> | microsoft/debugpy | diff --git a/tests/debugpy/test_multiproc.py b/tests/debugpy/test_multiproc.py
index 3836f7e4..8ca2b6db 100644
--- a/tests/debugpy/test_multiproc.py
+++ b/tests/debugpy/test_multiproc.py
@@ -135,6 +135,7 @@ def test_multiprocessing(pyfile, target, run, start_method):
expected_child_config = expected_subprocess_config(parent_session)
child_config = parent_session.wait_for_next_event("debugpyAttach")
+ child_config.pop("isOutputRedirected", None)
assert child_config == expected_child_config
parent_session.proceed()
@@ -144,6 +145,7 @@ def test_multiprocessing(pyfile, target, run, start_method):
expected_grandchild_config = expected_subprocess_config(child_session)
grandchild_config = child_session.wait_for_next_event("debugpyAttach")
+ grandchild_config.pop("isOutputRedirected", None)
assert grandchild_config == expected_grandchild_config
with debug.Session(grandchild_config) as grandchild_session:
@@ -204,6 +206,7 @@ def test_subprocess(pyfile, target, run, subProcess):
expected_child_config = expected_subprocess_config(parent_session)
child_config = parent_session.wait_for_next_event("debugpyAttach")
+ child_config.pop("isOutputRedirected", None)
assert child_config == expected_child_config
parent_session.proceed()
diff --git a/tests/debugpy/test_output.py b/tests/debugpy/test_output.py
index 43b1ea9e..3b5de697 100644
--- a/tests/debugpy/test_output.py
+++ b/tests/debugpy/test_output.py
@@ -63,6 +63,43 @@ def test_with_tab_in_output(pyfile, target, run):
assert session.output("stdout").startswith("Hello\tWorld")
[email protected]("redirect_mode", ["internalConsole", "redirectOutput"])
+def test_redirect_output_and_eval(pyfile, target, run, redirect_mode):
+ @pyfile
+ def code_to_debug():
+ import debuggee
+ import sys
+
+ debuggee.setup()
+ sys.stdout.write("line\n")
+ () # @wait_for_output
+
+ with debug.Session() as session:
+ if redirect_mode == "redirectOutput":
+ session.config["redirectOutput"] = True
+ elif redirect_mode == "internalConsole":
+ session.config["console"] = "internalConsole"
+ else:
+ raise AssertionError("Unexpected: " + redirect_mode)
+
+ with run(session, target(code_to_debug)):
+ session.set_breakpoints(code_to_debug, all)
+
+ stop = session.wait_for_stop()
+ session.request(
+ "evaluate",
+ {
+ "expression": "sys.stdout.write('evaluated\\n')",
+ "frameId": stop.frame_id,
+ "context": "repl",
+ },
+ )
+
+ session.request_continue()
+
+ assert session.output("stdout") == "line\nevaluated\n"
+
+
@pytest.mark.parametrize("run", runners.all)
@pytest.mark.parametrize("redirect", ["enabled", "disabled"])
def test_redirect_output(pyfile, target, run, redirect):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_git_commit_hash",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 4
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"black",
"flake8",
"tox",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"tests/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asgiref==3.8.1
black==25.1.0
blinker==1.9.0
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
click==8.1.8
colorama==0.4.6
coverage==7.8.0
-e git+https://github.com/microsoft/debugpy.git@6134532263fa046ae231c14bc8bcd3f2191f8c06#egg=debugpy
distlib==0.3.9
Django==4.2.20
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
flake8==7.2.0
Flask==3.1.0
gevent==24.11.1
greenlet==3.1.1
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
itsdangerous==2.2.0
Jinja2==3.1.6
MarkupSafe==3.0.2
mccabe==0.7.0
mypy-extensions==1.0.0
packaging==24.2
pathspec==0.12.1
platformdirs==4.3.7
pluggy==1.5.0
psutil==7.0.0
pycodestyle==2.13.0
pyflakes==3.3.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-timeout==2.3.1
pytest-xdist==3.6.1
requests==2.32.3
sqlparse==0.5.3
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
Werkzeug==3.1.3
zipp==3.21.0
zope.event==5.0
zope.interface==7.2
| name: debugpy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asgiref==3.8.1
- black==25.1.0
- blinker==1.9.0
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- click==8.1.8
- colorama==0.4.6
- coverage==7.8.0
- debugpy==1.5.1+26.g61345322
- distlib==0.3.9
- django==4.2.20
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- flake8==7.2.0
- flask==3.1.0
- gevent==24.11.1
- greenlet==3.1.1
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- itsdangerous==2.2.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mccabe==0.7.0
- mypy-extensions==1.0.0
- packaging==24.2
- pathspec==0.12.1
- platformdirs==4.3.7
- pluggy==1.5.0
- psutil==7.0.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-timeout==2.3.1
- pytest-xdist==3.6.1
- requests==2.32.3
- sqlparse==0.5.3
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- werkzeug==3.1.3
- zipp==3.21.0
- zope-event==5.0
- zope-interface==7.2
prefix: /opt/conda/envs/debugpy
| [
"tests/debugpy/test_output.py::test_redirect_output_and_eval[program-launch-redirectOutput]",
"tests/debugpy/test_output.py::test_redirect_output_and_eval[program-launch-internalConsole]"
] | [
"tests/debugpy/test_output.py::test_with_no_output[program-attach_pid]",
"tests/debugpy/test_output.py::test_redirect_output[program-enabled-attach_pid]",
"tests/debugpy/test_output.py::test_redirect_output[program-disabled-attach_pid]"
] | [
"tests/debugpy/test_multiproc.py::test_subprocess[program-attach_listen(api)-False]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-launch-None]",
"tests/debugpy/test_multiproc.py::test_multiprocessing[program-attach_listen(cli)-fork]",
"tests/debugpy/test_multiproc.py::test_multiprocessing[program-attach_connect(cli)-fork]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-launch-True]",
"tests/debugpy/test_multiproc.py::test_multiprocessing[program-attach_listen(api)-fork]",
"tests/debugpy/test_multiproc.py::test_multiprocessing[program-launch-fork]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-attach_listen(api)-None]",
"tests/debugpy/test_multiproc.py::test_multiprocessing[program-attach_connect(api)-fork]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-attach_connect(api)-False]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-launch-False]",
"tests/debugpy/test_multiproc.py::test_autokill[program-launch(integratedTerminal)]",
"tests/debugpy/test_multiproc.py::test_autokill_nodebug[program-launch(externalTerminal)]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-attach_listen(api)-True]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-attach_listen(cli)-None]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-attach_listen(cli)-True]",
"tests/debugpy/test_multiproc.py::test_autokill_nodebug[program-launch(internalConsole)]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-attach_connect(cli)-None]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-attach_connect(cli)-True]",
"tests/debugpy/test_multiproc.py::test_autokill_nodebug[program-launch(integratedTerminal)]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-attach_connect(api)-None]",
"tests/debugpy/test_multiproc.py::test_autokill[program-launch(externalTerminal)]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-attach_listen(cli)-False]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-attach_connect(cli)-False]",
"tests/debugpy/test_multiproc.py::test_argv_quoting[program-attach_listen(api)]",
"tests/debugpy/test_multiproc.py::test_autokill[program-launch(internalConsole)]",
"tests/debugpy/test_multiproc.py::test_subprocess[program-attach_connect(api)-True]",
"tests/debugpy/test_multiproc.py::test_echo_and_shell[program-attach_listen(api)]",
"tests/debugpy/test_multiproc.py::test_echo_and_shell[program-launch]",
"tests/debugpy/test_multiproc.py::test_echo_and_shell[program-attach_connect(api)]",
"tests/debugpy/test_multiproc.py::test_argv_quoting[program-attach_listen(cli)]",
"tests/debugpy/test_multiproc.py::test_echo_and_shell[program-attach_listen(cli)]",
"tests/debugpy/test_multiproc.py::test_argv_quoting[program-launch]",
"tests/debugpy/test_multiproc.py::test_echo_and_shell[program-attach_connect(cli)]",
"tests/debugpy/test_multiproc.py::test_argv_quoting[program-attach_connect(api)]",
"tests/debugpy/test_multiproc.py::test_argv_quoting[program-attach_connect(cli)]",
"tests/debugpy/test_output.py::test_with_no_output[program-attach_listen(cli)]",
"tests/debugpy/test_output.py::test_with_no_output[program-launch(internalConsole)]",
"tests/debugpy/test_multiproc.py::test_subprocess_unobserved[program--attach_connect(api)]",
"tests/debugpy/test_multiproc.py::test_subprocess_unobserved[program--attach_connect(cli)]",
"tests/debugpy/test_output.py::test_with_tab_in_output[program-launch]",
"tests/debugpy/test_multiproc.py::test_subprocess_unobserved[program-wait-attach_connect(cli)]",
"tests/debugpy/test_output.py::test_with_no_output[program-attach_connect(api)]",
"tests/debugpy/test_multiproc.py::test_subprocess_unobserved[program-wait-attach_connect(api)]",
"tests/debugpy/test_output.py::test_with_no_output[program-launch(integratedTerminal)]",
"tests/debugpy/test_output.py::test_with_no_output[program-launch(externalTerminal)]",
"tests/debugpy/test_output.py::test_with_no_output[program-attach_listen(api)]",
"tests/debugpy/test_output.py::test_with_no_output[program-attach_connect(cli)]",
"tests/debugpy/test_output.py::test_redirect_output[program-enabled-launch(integratedTerminal)]",
"tests/debugpy/test_output.py::test_redirect_output[program-enabled-launch(internalConsole)]",
"tests/debugpy/test_output.py::test_redirect_output[program-enabled-attach_listen(cli)]",
"tests/debugpy/test_output.py::test_redirect_output[program-enabled-attach_connect(cli)]",
"tests/debugpy/test_output.py::test_redirect_output[program-enabled-launch(externalTerminal)]",
"tests/debugpy/test_output.py::test_redirect_output[program-enabled-attach_connect(api)]",
"tests/debugpy/test_output.py::test_redirect_output[program-disabled-launch(internalConsole)]",
"tests/debugpy/test_output.py::test_redirect_output[program-disabled-launch(integratedTerminal)]",
"tests/debugpy/test_output.py::test_redirect_output[program-disabled-attach_listen(api)]",
"tests/debugpy/test_output.py::test_redirect_output[program-disabled-attach_listen(cli)]",
"tests/debugpy/test_output.py::test_redirect_output[program-disabled-attach_connect(api)]",
"tests/debugpy/test_output.py::test_redirect_output[program-disabled-launch(externalTerminal)]",
"tests/debugpy/test_output.py::test_redirect_output[program-disabled-attach_connect(cli)]",
"tests/debugpy/test_output.py::test_non_ascii_output[program-launch]",
"tests/debugpy/test_output.py::test_redirect_output[program-enabled-attach_listen(api)]"
] | [] | MIT License | 12,301 | 959 | [
"src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_comm.py",
"src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command_json.py",
"src/debugpy/_vendored/pydevd/pydevd.py",
"src/debugpy/adapter/clients.py"
] |
urllib3__urllib3-2571 | b19e55edf97c2a259b90e59014c21d7f51de62f6 | 2022-02-25 16:47:11 | aa73abc7b22a4a67e0ee957f5a3031109f73d3d9 | pquentin: Nice, the tests passed. Do we need a changelog entry, or are we going to pick it up from 1.26.x?
sethmlarson: @pquentin Yeah, we're backporting this one, no changelog needed. | diff --git a/src/urllib3/connection.py b/src/urllib3/connection.py
index c53d3ca9..4cc72bab 100644
--- a/src/urllib3/connection.py
+++ b/src/urllib3/connection.py
@@ -447,17 +447,16 @@ class HTTPSConnection(HTTPConnection):
self._connecting_to_proxy = bool(self.proxy)
sock: Union[socket.socket, "ssl.SSLSocket"]
- sock = self._new_conn()
+ self.sock = sock = self._new_conn()
hostname: str = self.host
tls_in_tls = False
if self._is_using_tunnel():
if self.tls_in_tls_required:
- sock = self._connect_tls_proxy(hostname, sock)
+ self.sock = sock = self._connect_tls_proxy(hostname, sock)
tls_in_tls = True
self._connecting_to_proxy = False
- self.sock = sock
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
| socket leaked when ssl_wrap_socket fails
### Subject
socket leaked when ssl_wrap_socket fails
### Environment
```
>>> import platform
>>> import urllib3
>>>
>>> print("OS", platform.platform())
OS Linux-5.4.0-100-generic-x86_64-with-glibc2.31
>>> print("Python", platform.python_version())
Python 3.9.10
>>> print("urllib3", urllib3.__version__)
urllib3 1.26.8
```
### Steps to Reproduce
```python
import pytest
import urllib3
def test_requests_leak():
with pytest.raises(Exception):
with urllib3.PoolManager(ca_certs=__file__) as http:
http.request("GET", "https://google.com")
```
### Expected Behavior
no ResourceWarning
### Actual Behavior
```
:
def unraisable_exception_runtest_hook() -> Generator[None, None, None]:
with catch_unraisable_exception() as cm:
yield
if cm.unraisable:
if cm.unraisable.err_msg is not None:
err_msg = cm.unraisable.err_msg
else:
err_msg = "Exception ignored in"
msg = f"{err_msg}: {cm.unraisable.object!r}\n\n"
msg += "".join(
traceback.format_exception(
cm.unraisable.exc_type,
cm.unraisable.exc_value,
cm.unraisable.exc_traceback,
)
)
> warnings.warn(pytest.PytestUnraisableExceptionWarning(msg))
E pytest.PytestUnraisableExceptionWarning: Exception ignored in: <socket.socket fd=-1, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=6>
E
E Traceback (most recent call last):
E File "/home/graingert/projects/requests-leak/test_requests_leak.py", line 8, in test_requests_leak
E http.request("GET", "https://google.com")
E ResourceWarning: unclosed <socket.socket fd=14, family=AddressFamily.AF_INET, type=SocketKind.SOCK_STREAM, proto=6, laddr=('192.168.99.58', 43618), raddr=('172.217.16.238', 443)>
```
| urllib3/urllib3 | diff --git a/test/with_dummyserver/test_https.py b/test/with_dummyserver/test_https.py
index 7d498fa6..1a6d6c40 100644
--- a/test/with_dummyserver/test_https.py
+++ b/test/with_dummyserver/test_https.py
@@ -329,6 +329,23 @@ class TestHTTPS(HTTPSDummyServerTestCase):
or "self signed certificate in certificate chain" in str(e.value.reason)
), f"Expected 'certificate verify failed', instead got: {e.value.reason!r}"
+ def test_wrap_socket_failure_resource_leak(self) -> None:
+ with HTTPSConnectionPool(
+ self.host,
+ self.port,
+ cert_reqs="CERT_REQUIRED",
+ ca_certs=self.bad_ca_path,
+ ssl_minimum_version=self.tls_version(),
+ ) as https_pool:
+ conn = https_pool._get_conn()
+ try:
+ with pytest.raises(ssl.SSLError):
+ conn.connect()
+
+ assert conn.sock
+ finally:
+ conn.close()
+
def test_verified_without_ca_certs(self) -> None:
# default is cert_reqs=None which is ssl.CERT_NONE
with HTTPSConnectionPool(
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 1
} | 1.26 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
cffi==1.17.1
click==8.1.8
coverage==6.3.1
cryptography==35.0.0
flaky==3.7.0
freezegun==1.5.1
idna==3.10
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
packaging==24.2
pluggy==1.5.0
py==1.11.0
pycparser==2.22
PySocks==1.7.1
pytest==7.0.0
pytest-freezegun==0.4.2
pytest-timeout==2.1.0
python-dateutil==2.9.0.post0
six==1.17.0
tomli==2.2.1
tornado==6.1
towncrier @ git+https://github.com/twisted/towncrier.git@3af0c53126f04d983fe7bf8117622c68b51b4476
trustme==0.9.0
-e git+https://github.com/urllib3/urllib3.git@b19e55edf97c2a259b90e59014c21d7f51de62f6#egg=urllib3
zipp==3.21.0
| name: urllib3
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- cffi==1.17.1
- click==8.1.8
- coverage==6.3.1
- cryptography==35.0.0
- flaky==3.7.0
- freezegun==1.5.1
- idna==3.10
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- packaging==24.2
- pluggy==1.5.0
- py==1.11.0
- pycparser==2.22
- pysocks==1.7.1
- pytest==7.0.0
- pytest-freezegun==0.4.2
- pytest-timeout==2.1.0
- python-dateutil==2.9.0.post0
- six==1.17.0
- tomli==2.2.1
- tornado==6.1
- towncrier==24.8.0.dev0
- trustme==0.9.0
- urllib3==2.0.0.dev0
- zipp==3.21.0
prefix: /opt/conda/envs/urllib3
| [
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_wrap_socket_failure_resource_leak",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_wrap_socket_failure_resource_leak"
] | [
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_ssl_version_is_deprecated"
] | [
"test/with_dummyserver/test_https.py::TestHTTPS::test_no_ssl",
"test/with_dummyserver/test_https.py::TestHTTPS::test_verify_none_and_bad_fingerprint",
"test/with_dummyserver/test_https.py::TestHTTPS::test_set_cert_default_cert_required",
"test/with_dummyserver/test_https.py::TestHTTPS::test_default_ssl_context_ssl_min_max_versions",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_simple",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_dotted_fqdn",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_client_intermediate",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_client_no_intermediate",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_client_key_password",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_client_encrypted_key_requires_password",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_verified",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_verified_with_context",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_context_combines_with_ca_certs",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_ca_dir_verified",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_invalid_common_name",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_verified_with_bad_ca_certs",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_verified_without_ca_certs",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_no_ssl",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_unverified_ssl",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_ssl_unverified_with_ca_certs",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_assert_hostname_false",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_assert_specific_hostname",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_server_hostname",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_assert_fingerprint_md5",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_assert_fingerprint_sha1",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_assert_fingerprint_sha256",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_assert_invalid_fingerprint",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_verify_none_and_bad_fingerprint",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_verify_none_and_good_fingerprint",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_good_fingerprint_and_hostname_mismatch",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_tunnel",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_enhanced_ssl_connection",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_ssl_correct_system_time",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_ssl_wrong_system_time",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_set_ssl_version_to_tls_version",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_set_cert_default_cert_required",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_tls_protocol_name_of_socket",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_ssl_version_with_protocol_tls_or_client_not_deprecated[None]",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_ssl_version_with_protocol_tls_or_client_not_deprecated[_SSLMethod.PROTOCOL_TLS]",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_ssl_version_with_protocol_tls_or_client_not_deprecated[_SSLMethod.PROTOCOL_TLS_CLIENT]",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_no_tls_version_deprecation_with_ssl_context",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_tls_version_maximum_and_minimum",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_sslkeylogfile",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_sslkeylogfile_empty[None]",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_sslkeylogfile_empty[]",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_alpn_default",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_default_ssl_context_ssl_min_max_versions",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_2::test_ssl_context_ssl_version_uses_ssl_min_max_versions",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_simple",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_dotted_fqdn",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_client_intermediate",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_client_no_intermediate",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_client_key_password",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_client_encrypted_key_requires_password",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_verified",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_verified_with_context",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_context_combines_with_ca_certs",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_ca_dir_verified",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_invalid_common_name",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_verified_with_bad_ca_certs",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_verified_without_ca_certs",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_no_ssl",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_unverified_ssl",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_ssl_unverified_with_ca_certs",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_assert_hostname_false",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_assert_specific_hostname",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_server_hostname",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_assert_fingerprint_md5",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_assert_fingerprint_sha1",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_assert_fingerprint_sha256",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_assert_invalid_fingerprint",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_verify_none_and_bad_fingerprint",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_verify_none_and_good_fingerprint",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_good_fingerprint_and_hostname_mismatch",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_tunnel",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_enhanced_ssl_connection",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_ssl_correct_system_time",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_ssl_wrong_system_time",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_set_ssl_version_to_tls_version",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_set_cert_default_cert_required",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_tls_protocol_name_of_socket",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_ssl_version_with_protocol_tls_or_client_not_deprecated[None]",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_ssl_version_with_protocol_tls_or_client_not_deprecated[_SSLMethod.PROTOCOL_TLS]",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_ssl_version_with_protocol_tls_or_client_not_deprecated[_SSLMethod.PROTOCOL_TLS_CLIENT]",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_no_tls_version_deprecation_with_ssl_context",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_tls_version_maximum_and_minimum",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_sslkeylogfile",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_sslkeylogfile_empty[None]",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_sslkeylogfile_empty[]",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_alpn_default",
"test/with_dummyserver/test_https.py::TestHTTPS_TLSv1_3::test_default_ssl_context_ssl_min_max_versions",
"test/with_dummyserver/test_https.py::TestHTTPS_Hostname::test_can_validate_san[localhost]",
"test/with_dummyserver/test_https.py::TestHTTPS_Hostname::test_can_validate_san[127.0.0.1]",
"test/with_dummyserver/test_https.py::TestHTTPS_Hostname::test_can_validate_san[::1]",
"test/with_dummyserver/test_https.py::TestHTTPS_Hostname::test_common_name_without_san_fails[localhost]",
"test/with_dummyserver/test_https.py::TestHTTPS_Hostname::test_common_name_without_san_fails[127.0.0.1]",
"test/with_dummyserver/test_https.py::TestHTTPS_Hostname::test_common_name_without_san_fails[::1]",
"test/with_dummyserver/test_https.py::TestHTTPS_Hostname::test_common_name_without_san_with_different_common_name",
"test/with_dummyserver/test_https.py::TestHTTPS_Hostname::test_hostname_checks_common_name_respected[localhost-True]",
"test/with_dummyserver/test_https.py::TestHTTPS_Hostname::test_hostname_checks_common_name_respected[localhost-False]",
"test/with_dummyserver/test_https.py::TestHTTPS_Hostname::test_hostname_checks_common_name_respected[127.0.0.1-True]",
"test/with_dummyserver/test_https.py::TestHTTPS_Hostname::test_hostname_checks_common_name_respected[127.0.0.1-False]",
"test/with_dummyserver/test_https.py::TestHTTPS_Hostname::test_hostname_checks_common_name_respected[::1-True]",
"test/with_dummyserver/test_https.py::TestHTTPS_Hostname::test_hostname_checks_common_name_respected[::1-False]",
"test/with_dummyserver/test_https.py::TestHTTPS_IPV4SAN::test_can_validate_ip_san",
"test/with_dummyserver/test_https.py::TestHTTPS_IPV6SAN::test_can_validate_ipv6_san[::1]",
"test/with_dummyserver/test_https.py::TestHTTPS_IPV6SAN::test_can_validate_ipv6_san[[::1]]"
] | [] | MIT License | 12,302 | 239 | [
"src/urllib3/connection.py"
] |
lark-parser__lark-1120 | 5772e1878729fe07d50fe3b80b8f360ac8f8d050 | 2022-02-26 19:24:45 | 501689455b00ff643b7901994dcb6d2a92d4412e | diff --git a/lark/utils.py b/lark/utils.py
index 77252b6..d7bab6f 100644
--- a/lark/utils.py
+++ b/lark/utils.py
@@ -137,9 +137,10 @@ def get_regexp_width(expr):
# we manually test for the most important info (whether the empty string is matched)
c = regex.compile(regexp_final)
if c.match('') is None:
- return 1, sre_constants.MAXREPEAT
+ # MAXREPEAT is a none pickable subclass of int, therefore needs to be converted to enable caching
+ return 1, int(sre_constants.MAXREPEAT)
else:
- return 0, sre_constants.MAXREPEAT
+ return 0, int(sre_constants.MAXREPEAT)
###}
| Caching does not work with recursive pattern
**Describe the bug**
I tried to leverage caching to speed up the loading time of my grammar, but it didn't work, so I started to dig into the code and found that it can't load the pickled file
```shell
Traceback (most recent call last):
...
File ".../lark/lark.py", line 326, in __init__
raise e
File ".../lark/lark.py", line 321, in __init__
cached_parser_data = pickle.load(f)
TypeError: __new__() missing 1 required positional argument: 'name'
```
I could trace it down to an issue with `MAXREPEAT` and it happens, when a recursive pattern is used.
**To Reproduce**
```python
from lark import Lark
def main():
json_grammar = r"""
start: element*
element: value
?value: "null" -> null
| QUOTED_INTERPOLATION -> string
QUOTED_INTERPOLATION: /'(?R)'/
%import common.NEWLINE
%import common.WS_INLINE
%ignore NEWLINE
%ignore WS_INLINE
"""
json_parser = Lark(json_grammar, start="start", parser="lalr", regex=True, cache=True)
tree = json_parser.parse(
"""
null
"""
)
print(tree.pretty())
if __name__ == "__main__":
main()
```
| lark-parser/lark | diff --git a/tests/test_cache.py b/tests/test_cache.py
index dd67435..16a35bd 100644
--- a/tests/test_cache.py
+++ b/tests/test_cache.py
@@ -1,5 +1,6 @@
from __future__ import absolute_import
+import logging
from unittest import TestCase, main
from lark import Lark, Tree, Transformer
@@ -140,6 +141,24 @@ class TestCache(TestCase):
res = parser.parse("ab")
self.assertEqual(res, Tree('startab', [Tree('expr', ['a', 'b'])]))
+ def test_recursive_pattern(self):
+ g = """
+ start: recursive+
+ recursive: /\w{3}\d{3}(?R)?/
+ """
+
+ assert len(self.mock_fs.files) == 0
+ Lark(g, parser="lalr", regex=True, cache=True)
+ assert len(self.mock_fs.files) == 1
+
+ with self.assertLogs("lark", level="ERROR") as cm:
+ Lark(g, parser='lalr', regex=True, cache=True)
+ assert len(self.mock_fs.files) == 1
+ # need to add an error log, because 'self.assertNoLogs' was added in Python 3.10
+ logging.getLogger('lark').error("dummy message")
+ # should only have the dummy log
+ self.assertCountEqual(cm.output, ["ERROR:lark:dummy message"])
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[regex,nearley,atomic_cache]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | atomicwrites==1.4.1
exceptiongroup==1.2.2
iniconfig==2.1.0
Js2Py==0.74
-e git+https://github.com/lark-parser/lark.git@5772e1878729fe07d50fe3b80b8f360ac8f8d050#egg=lark
packaging==24.2
pluggy==1.5.0
pyjsparser==2.7.1
pytest==8.3.5
regex==2024.11.6
six==1.17.0
tomli==2.2.1
tzlocal==5.3.1
| name: lark
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- atomicwrites==1.4.1
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- js2py==0.74
- packaging==24.2
- pluggy==1.5.0
- pyjsparser==2.7.1
- pytest==8.3.5
- regex==2024.11.6
- six==1.17.0
- tomli==2.2.1
- tzlocal==5.3.1
prefix: /opt/conda/envs/lark
| [
"tests/test_cache.py::TestCache::test_recursive_pattern"
] | [] | [
"tests/test_cache.py::TestCache::test_automatic_naming",
"tests/test_cache.py::TestCache::test_custom_lexer",
"tests/test_cache.py::TestCache::test_imports",
"tests/test_cache.py::TestCache::test_inline",
"tests/test_cache.py::TestCache::test_options",
"tests/test_cache.py::TestCache::test_simple"
] | [] | MIT License | 12,306 | 194 | [
"lark/utils.py"
] |
|
pytorch__ignite-2496 | 727150e82fc3f5478717a6d0ea08c80db29b0e10 | 2022-02-27 15:37:48 | a7246e118cf2846b03f4872a1b78adf09e23f4bc | sdesrozis: @yuta0821 Thanks very much for this work. I have to think about the api though. The `use_legacy` arg is for BC (so to be removed one day) but the internal only `keep_fist_lr` makes me a little doubtful. This arg is only required using ConcatScheduler, could we do smarter ? Anyway, it works and that is pretty cool. Let's just think about it. I suppose that @vfdev-5 would have an opinion on that.
yuta0821: @sdesrozis Yes, I agree, I don't want to add more args without any thought. However, I haven't yet figured out how to control the `lr` value of an `LRScheduler` bound to a `ConcatScheduler` without using `keep_first_lr`. Originally, this was implemented by adjusting the `milestones_values` of `PeacewiseLinear`, but chage of this PR no longer works. I'll see if there's a smarter way to do this.
sdesrozis: My toughts are the target would be having the following code working fine
```python
from torch.optim.lr_scheduler import StepLR
scheduler1 = StepLR(optimizer, step_size=3, gamma=0.1)
scheduler1 = LRScheduler(scheduler1)
scheduler2 = StepLR(optimizer, step_size=3, gamma=0.01)
scheduler2 = LRScheduler(scheduler2)
scheduler = ConcatScheduler(schedulers=[scheduler1, scheduler2], durations=[4, ])
```
Whether `use_legacy` is used or not for `scheduler2`, it should produce the same output. It means `ConcatScheduler` should enable `keep_first_lr` of `LRScheduler` internally without any user api. The minimal would be to enable using a method rather than at init.
```python
scheduler2 = LRScheduler(scheduler2)
scheduler2.keep_first_lr = True
```
I don't know yet...
yuta0821: @sdesrozis I'd like to confirm the behavior of following code using `use_legacy=True`. Is this output is right ?
```python
from torch.optim.lr_scheduler import StepLR
optimizer = torch.optim.SGD([tensor], lr=0.01)
torch_scheduler1 = StepLR(optimizer, step_size=3, gamma=0.1)
scheduler1 = LRScheduler(torch_scheduler1, use_legacy=True)
torch_scheduler2 = StepLR(optimizer, step_size=3, gamma=0.01)
scheduler2 = LRScheduler(torch_scheduler2, use_legacy=True)
scheduler = ConcatScheduler(schedulers=[scheduler1, scheduler2], durations=[4, ])
def dummy_update(engine, batch):
pass
trainer = Engine(dummy_update)
@trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr"])
trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
_ = trainer.run([0] * 9, max_epochs=1)
```
```
0.01
0.01
0.01
0.001
0.001
0.001
0.001
1e-05
1e-05
```
> Whether `use_legacy` is used or not for schedulers, it should produce the same output. It means `ConcatScheduler` should enable `keep_first_lr` of `LRScheduler` internally without any user api.
OK, I understand what message means. Thank you for the clear explanation. Surely, we must enable `keep_first_lr` of `LRScheduler` internally without any user api.
vfdev-5: > @sdesrozis I'd like to confirm the behavior of following code using `use_legacy=True`. Is this output is right ?
>
> ```python
> from torch.optim.lr_scheduler import StepLR
>
> optimizer = torch.optim.SGD([tensor], lr=0.01)
> torch_scheduler1 = StepLR(optimizer, step_size=3, gamma=0.1)
> scheduler1 = LRScheduler(torch_scheduler1, use_legacy=True)
>
> torch_scheduler2 = StepLR(optimizer, step_size=3, gamma=0.01)
> scheduler2 = LRScheduler(torch_scheduler2, use_legacy=True)
>
> scheduler = ConcatScheduler(schedulers=[scheduler1, scheduler2], durations=[4, ])
>
> def dummy_update(engine, batch):
> pass
> trainer = Engine(dummy_update)
>
> @trainer.on(Events.ITERATION_COMPLETED)
> def print_lr():
> print(optimizer.param_groups[0]["lr"])
> trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
>
> _ = trainer.run([0] * 9, max_epochs=1)
> ```
>
> ```
> 0.01
> 0.01
> 0.01
> 0.001
> 0.001
> 0.001
> 0.001
> 1e-05
> 1e-05
> ```
@yuta0821 the output looks correct to me. We have
```
--- scheduler 1
0.01
0.01
0.01
0.001
--- scheduler 2
0.001
0.001
0.001
1e-05
1e-05
```
What happens if using `use_legacy=False` and attaching to appropriate event ?
yuta0821: @vfdev-5
Sorry for late reply.
> What happens if using use_legacy=False and attaching to appropriate event ?
If set `trainer.add_event_handler` to the proper position, the behavior with `use_legacy = False` becomes as follows.
With `keep_lr_first`, the behavior of this code is the same as with `use_legacy = True`.
```python
from torch.optim.lr_scheduler import StepLR
optimizer = torch.optim.SGD([tensor], lr=0.01)
torch_scheduler1 = StepLR(optimizer, step_size=3, gamma=0.1)
scheduler1 = LRScheduler(torch_scheduler1, use_legacy=False)
torch_scheduler2 = StepLR(optimizer, step_size=3, gamma=0.01)
scheduler2 = LRScheduler(torch_scheduler2, use_legacy=False)
scheduler = ConcatScheduler(schedulers=[scheduler1, scheduler2], durations=[4, ])
def dummy_update(engine, batch):
pass
trainer = Engine(dummy_update)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr"])
_ = trainer.run([0] * 9, max_epochs=1)
```
```
--- scheduler 1
0.01
0.01
0.01
0.001
--- scheduler 2
0.001
0.001
0.001
1e-05
1e-05
```
vfdev-5: @yuta0821 sorry for delay on this PR, i'm still missing a complete understanding of the problem here. I'll try to figure out what happens exactly on my side and try to suggest a way to handle that. Adding an argument for internal usage is not a good design.
yuta0821: @vfdev-5
> @yuta0821 sorry for delay on this PR, i'm still missing a complete understanding of the problem here. I'll try to figure out what happens exactly on my side and try to suggest a way to handle that. Adding an argument for internal usage is not a good design.
> Thanks for your patience and working on the PR !
No, sir, not at all. Thanks a lot for your help. If I can contribute in any way, I'll be happy to do so.
vfdev-5: @yuta0821 i understand the problem and why you need to introduce `keep_first_lr` argument.
In two words for others, the problem with `create_lr_scheduler_with_warmup` is that either 1) we have an additional transitional lr value between `PiecewiseLinear` and `LRScheduler`:
```- PiecewiseLinear updates LR
1 0.0
- PiecewiseLinear updates LR
2 0.03
- PiecewiseLinear updates LR
3 0.06
- PiecewiseLinear updates LR
4 0.09
- PiecewiseLinear updates LR
5 0.12
- PiecewiseLinear updates LR <----- additional step to remove
6 0.01
- LRScheduler updates LR
7 0.01
- LRScheduler updates LR
8 0.01
- LRScheduler updates LR
9 0.01
- LRScheduler updates LR
10 0.005
```
or 2) `LRScheduler` modifies LR in a relative way => if `PiecewiseLinear` stops one step before, then `LRScheduler` will work from this value, e.g `0.12` from example above. This also gives a wrong behavior.
I see here two ways to move forward:
- either we add a more meaningful public arg to `LRScheduler`, e.g. `relative: bool = True` and thus we setup `create_lr_scheduler_with_warmup` with `LRScheduler(..., relative=False)`
- do something like `lr_scheduler.lr_scheduler.last_epoch += 1` inside `create_lr_scheduler_with_warmup`:
```python
lr_scheduler = LRScheduler(lr_scheduler, save_history=save_history)
lr_scheduler.lr_scheduler.last_epoch += 1
```
yuta0821: > @yuta0821 i understand the problem and why you need to introduce keep_first_lr argument.
Thank you for your analysis. Your summary is exactly what I was thinking.
vfdev-5: @yuta0821 I'm trying to figure out how to merge your work to master step by step.
You updated tests/ignite/handlers/test_param_scheduler.py , do you think we could make merge without updates introduced in ignite/handlers/param_scheduler.py ?
If yes, could you please send a PR with only updated tests ? Thanks a lot for your patience
yuta0821: I think we could make merge neither `tests/ignite/handlers/test_param_scheduler.py` nor `tests/ignite/contrib/engines/test_common.py` without updates introduced `ignite/handlers/param_scheduler.py`.
This is because the changes in tests such like `trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)` are added to tackle on changes caused by `ignite/handlers/param_scheduler.py`.
vfdev-5: @yuta0821 I pushed few commits to make this PR merged. Basically, it is one of your suggestions. Currently, I do not have a better way to fix it. Let's keep it as it is.
Thanks again for starting this work and sorry for delay it took to make it landed | diff --git a/ignite/contrib/engines/common.py b/ignite/contrib/engines/common.py
index 3b773f11..bac1eaa5 100644
--- a/ignite/contrib/engines/common.py
+++ b/ignite/contrib/engines/common.py
@@ -13,7 +13,6 @@ import ignite.distributed as idist
from ignite.contrib.handlers import (
ClearMLLogger,
global_step_from_engine,
- LRScheduler,
MLflowLogger,
NeptuneLogger,
PolyaxonLogger,
@@ -165,8 +164,6 @@ def _setup_common_training_handlers(
trainer.add_event_handler(
Events.ITERATION_COMPLETED, lambda engine: cast(_LRScheduler, lr_scheduler).step()
)
- elif isinstance(lr_scheduler, LRScheduler):
- trainer.add_event_handler(Events.ITERATION_COMPLETED, lr_scheduler)
else:
trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)
diff --git a/ignite/handlers/param_scheduler.py b/ignite/handlers/param_scheduler.py
index e88c24a3..b09611c7 100644
--- a/ignite/handlers/param_scheduler.py
+++ b/ignite/handlers/param_scheduler.py
@@ -795,6 +795,7 @@ class LRScheduler(ParamScheduler):
lr_scheduler: lr_scheduler object to wrap.
save_history: whether to log the parameter values to
`engine.state.param_history`, (default=False).
+ use_legacy: if True, scheduler should be attached to ``Events.ITERATION_COMPLETED``, (default=False).
Examples:
@@ -808,20 +809,14 @@ class LRScheduler(ParamScheduler):
from torch.optim.lr_scheduler import StepLR
torch_lr_scheduler = StepLR(default_optimizer, step_size=3, gamma=0.1)
-
scheduler = LRScheduler(torch_lr_scheduler)
+ default_trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
+
@default_trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(default_optimizer.param_groups[0]["lr"])
- # In this example, we assume to have installed PyTorch>=1.1.0
- # (with new `torch.optim.lr_scheduler` behaviour) and
- # we attach scheduler to Events.ITERATION_COMPLETED
- # instead of Events.ITERATION_STARTED to make sure to use
- # the first lr value from the optimizer, otherwise it is will be skipped:
- default_trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
-
default_trainer.run([0] * 8, max_epochs=1)
.. testoutput::
@@ -836,9 +831,17 @@ class LRScheduler(ParamScheduler):
0.001...
.. versionadded:: 0.4.5
+
+ .. versionchanged:: 0.5.0
+ added `use_legacy` argument
"""
- def __init__(self, lr_scheduler: _LRScheduler, save_history: bool = False):
+ def __init__(
+ self,
+ lr_scheduler: _LRScheduler,
+ save_history: bool = False,
+ use_legacy: bool = False,
+ ):
if not isinstance(lr_scheduler, _LRScheduler):
raise TypeError(
@@ -852,11 +855,19 @@ class LRScheduler(ParamScheduler):
param_name="lr",
save_history=save_history,
)
+ if use_legacy:
+ warnings.warn(
+ "Please make sure to attach scheduler to Events.ITERATION_COMPLETED "
+ "instead of Events.ITERATION_STARTED to make sure to use "
+ "the first lr value from the optimizer, otherwise it is will be skipped"
+ )
+ self.lr_scheduler.last_epoch += 1 # type: ignore[attr-defined]
+
self._state_attrs += ["lr_scheduler"]
def __call__(self, engine: Optional[Engine], name: Optional[str] = None) -> None:
- self.lr_scheduler.last_epoch += 1 # type: ignore[attr-defined]
super(LRScheduler, self).__call__(engine, name)
+ self.lr_scheduler.last_epoch += 1 # type: ignore[attr-defined]
def get_param(self) -> Union[float, List[float]]:
"""Method to get current optimizer's parameter value"""
@@ -904,9 +915,9 @@ class LRScheduler(ParamScheduler):
values = []
scheduler = cls(save_history=False, lr_scheduler=lr_scheduler, **kwargs)
for i in range(num_events):
+ scheduler(engine=None)
params = [p[scheduler.param_name] for p in scheduler.optimizer_param_groups]
values.append([i] + params)
- scheduler(engine=None)
obj = torch.load(cache_filepath.as_posix())
lr_scheduler.load_state_dict(obj["lr_scheduler"])
@@ -927,8 +938,7 @@ def create_lr_scheduler_with_warmup(
Helper method to create a learning rate scheduler with a linear warm-up.
Args:
- lr_scheduler: learning rate scheduler
- after the warm-up.
+ lr_scheduler: learning rate scheduler after the warm-up.
warmup_start_value: learning rate start value of the warm-up phase.
warmup_duration: warm-up phase duration, number of events.
warmup_end_value: learning rate end value of the warm-up phase, (default=None). If None,
@@ -1011,10 +1021,15 @@ def create_lr_scheduler_with_warmup(
if isinstance(lr_scheduler, _LRScheduler):
init_lr = param_group["lr"]
-
if init_lr != param_group_warmup_end_value:
milestones_values.append((warmup_duration, init_lr))
+ # We need to advance torch lr_scheduler to avoid duplicated lr value
+ # given by PiecewiseLinear and LRScheduler.
+ # We suggest to attach output scheduler on ITERATION_STARTED but
+ # torch lr_scheduler works with ITERATION_COMPLETED
+ # See also https://github.com/pytorch/ignite/pull/2496#issuecomment-1065984440
+ lr_scheduler.last_epoch += 1
lr_scheduler = LRScheduler(lr_scheduler, save_history=save_history)
else:
init_lr = lr_scheduler.get_param()
| Make LRScheduler attachable to Events.ITERATION_STARTED
## 🚀 Feature
Currently, the correct way to use `LRScheduler` wrapper for pytorch >= 1.1.0 is the following:
```python
from torch.optim.lr_scheduler import StepLR
torch_lr_scheduler = StepLR(optimizer, step_size=3, gamma=0.1)
scheduler = LRScheduler(torch_lr_scheduler)
@trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr"])
# In this example, we assume to have installed PyTorch>=1.1.0
# (with new `torch.optim.lr_scheduler` behaviour) and
# we attach scheduler to Events.ITERATION_COMPLETED
# instead of Events.ITERATION_STARTED to make sure to use
# the first lr value from the optimizer, otherwise it is will be skipped:
trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
trainer..run([0] * 8, max_epochs=1)
```
>
```
0.1
0.1
0.1
0.010
0.010
0.010
0.001
0.001
```
- https://github.com/pytorch/ignite/pull/2463
however, other schedulers should be used as following ([link](https://pytorch.org/ignite/generated/ignite.handlers.param_scheduler.PiecewiseLinear.html#ignite.handlers.param_scheduler.PiecewiseLinear))
```python
milestones_values = [(1, 1.0), (3, 0.8), (5, 0.2)]
scheduler = PiecewiseLinear(
optimizer, "lr", milestones_values=milestones_values)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr"])
trainer.run([0] * 6, max_epochs=1)
```
The idea is to improve `LRScheduler` such that we could attach it to `Events.ITERATION_STARTED` and have a coherent API. It will be a BC-breaking change, but for good.
So, desired example using `LRScheduler` should be:
```python
from torch.optim.lr_scheduler import StepLR
torch_lr_scheduler = StepLR(optimizer, step_size=3, gamma=0.1)
scheduler = LRScheduler(torch_lr_scheduler)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@trainer.on(Events.ITERATION_COMPLETED)
def print_lr():
print(optimizer.param_groups[0]["lr"])
trainer.run([0] * 8, max_epochs=1)
```
Currently, this gives a wrong behaviour as the first 0.1 wasn't consumed by the training step.
```
0.1
0.1
0.010
0.010
0.010
0.001
0.001
```
The idea could be to retain the first value and reapply it once and then keep everything as it is now.
| pytorch/ignite | diff --git a/tests/ignite/contrib/engines/test_common.py b/tests/ignite/contrib/engines/test_common.py
index 9e89491a..cadae338 100644
--- a/tests/ignite/contrib/engines/test_common.py
+++ b/tests/ignite/contrib/engines/test_common.py
@@ -128,8 +128,8 @@ def _test_setup_common_training_handlers(
# Check LR scheduling
assert optimizer.param_groups[0]["lr"] <= lr * gamma ** (
- num_iters * num_epochs / step_size
- ), f"{optimizer.param_groups[0]['lr']} vs {lr * gamma ** (num_iters * num_epochs / step_size)}"
+ (num_iters * num_epochs - 1) // step_size
+ ), f"{optimizer.param_groups[0]['lr']} vs {lr * gamma ** ((num_iters * num_epochs - 1) // step_size)}"
def test_asserts_setup_common_training_handlers():
diff --git a/tests/ignite/handlers/test_param_scheduler.py b/tests/ignite/handlers/test_param_scheduler.py
index 77a9a3eb..8edd0225 100644
--- a/tests/ignite/handlers/test_param_scheduler.py
+++ b/tests/ignite/handlers/test_param_scheduler.py
@@ -648,53 +648,71 @@ def test_lr_scheduler(torch_lr_scheduler_cls, kwargs):
tensor = torch.zeros([1], requires_grad=True)
optimizer1 = torch.optim.SGD([tensor], lr=0.01)
optimizer2 = torch.optim.SGD([tensor], lr=0.01)
+ optimizer3 = torch.optim.SGD([tensor], lr=0.01)
opt_state_dict1 = optimizer1.state_dict()
opt_state_dict2 = optimizer2.state_dict()
+ opt_state_dict3 = optimizer3.state_dict()
torch_lr_scheduler1 = torch_lr_scheduler_cls(optimizer=optimizer1, **kwargs)
- scheduler = LRScheduler(torch_lr_scheduler1)
- state_dict1 = scheduler.state_dict()
+ scheduler1 = LRScheduler(torch_lr_scheduler1)
+ state_dict1 = scheduler1.state_dict()
torch_lr_scheduler2 = torch_lr_scheduler_cls(optimizer=optimizer2, **kwargs)
- state_dict2 = torch_lr_scheduler2.state_dict()
+ with pytest.warns(UserWarning, match=r"the first lr value from the optimizer, otherwise it is will be skipped"):
+ scheduler2 = LRScheduler(torch_lr_scheduler2, use_legacy=True)
+ state_dict2 = scheduler2.state_dict()
+
+ torch_lr_scheduler3 = torch_lr_scheduler_cls(optimizer=optimizer3, **kwargs)
+ state_dict3 = torch_lr_scheduler3.state_dict()
def dummy_update(engine, batch):
optimizer1.step()
optimizer2.step()
+ optimizer3.step()
trainer = Engine(dummy_update)
+ trainer.add_event_handler(Events.ITERATION_STARTED, scheduler1)
@trainer.on(Events.ITERATION_STARTED)
- def save_lr(engine):
- lrs.append(optimizer1.param_groups[0]["lr"])
+ def save_lr1(engine):
+ lrs1.append(optimizer1.param_groups[0]["lr"])
+
+ @trainer.on(Events.ITERATION_STARTED)
+ def save_lr2(engine):
+ lrs2.append(optimizer2.param_groups[0]["lr"])
@trainer.on(Events.ITERATION_STARTED)
def save_true_lr(engine):
- lrs_true.append(optimizer2.param_groups[0]["lr"])
+ lrs_true.append(optimizer3.param_groups[0]["lr"])
@trainer.on(Events.ITERATION_COMPLETED)
def torch_lr_scheduler_step(engine):
- torch_lr_scheduler2.step()
+ torch_lr_scheduler3.step()
- trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
+ trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler2)
for _ in range(2):
- lrs = []
+ lrs1 = []
+ lrs2 = []
lrs_true = []
data = [0] * 10
max_epochs = 2
trainer.run(data, max_epochs=max_epochs)
- assert lrs_true == pytest.approx(lrs), f"{_}: {lrs_true} ({len(lrs_true)}) vs {lrs} ({len(lrs)})"
+ assert lrs_true == pytest.approx(lrs1), f"{_}: {lrs_true} ({len(lrs_true)}) vs {lrs1} ({len(lrs1)})"
+ assert lrs_true == pytest.approx(lrs2), f"{_}: {lrs_true} ({len(lrs_true)}) vs {lrs2} ({len(lrs2)})"
optimizer1.load_state_dict(opt_state_dict1)
- scheduler.load_state_dict(state_dict1)
+ scheduler1.load_state_dict(state_dict1)
optimizer2.load_state_dict(opt_state_dict2)
- torch_lr_scheduler2.load_state_dict(state_dict2)
+ scheduler2.load_state_dict(state_dict2)
+ optimizer3.load_state_dict(opt_state_dict3)
+ torch_lr_scheduler3.load_state_dict(state_dict3)
- optimizer3 = torch.optim.SGD([tensor], lr=0.01)
- torch_lr_scheduler3 = torch_lr_scheduler_cls(optimizer=optimizer3, **kwargs)
+ optimizer4 = torch.optim.SGD([tensor], lr=0.01)
+ torch_lr_scheduler4 = torch_lr_scheduler_cls(optimizer=optimizer4, **kwargs)
- simulated_values = LRScheduler.simulate_values(num_events=len(data) * max_epochs, lr_scheduler=torch_lr_scheduler3)
- assert lrs == pytest.approx([v for i, v in simulated_values])
+ simulated_values = LRScheduler.simulate_values(num_events=len(data) * max_epochs, lr_scheduler=torch_lr_scheduler4)
+ assert lrs1 == pytest.approx([v for i, v in simulated_values])
+ assert lrs2 == pytest.approx([v for i, v in simulated_values])
def test_piecewiselinear_asserts():
@@ -813,11 +831,8 @@ def test_simulate_and_plot_values():
def _test(scheduler_cls, **scheduler_kwargs):
- optimizer = None
- event = Events.ITERATION_STARTED
if scheduler_cls == LRScheduler:
optimizer = scheduler_kwargs["lr_scheduler"].optimizer
- event = Events.ITERATION_COMPLETED
elif scheduler_cls == ConcatScheduler:
optimizer = scheduler_kwargs["optimizer"]
del scheduler_kwargs["optimizer"]
@@ -828,7 +843,7 @@ def test_simulate_and_plot_values():
max_epochs = 2
data = [0] * 10
- # simulated_values = scheduler_cls.simulate_values(num_events=len(data) * max_epochs, **scheduler_kwargs)
+ simulated_values = scheduler_cls.simulate_values(num_events=len(data) * max_epochs, **scheduler_kwargs)
scheduler = scheduler_cls(**scheduler_kwargs)
@@ -838,15 +853,11 @@ def test_simulate_and_plot_values():
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
- trainer.add_event_handler(event, scheduler)
+ trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_STARTED, save_lr)
trainer.run(data, max_epochs=max_epochs)
- # assert lrs == pytest.approx([v for i, v in simulated_values])
-
- if scheduler_cls == LRScheduler or scheduler_cls == ConcatScheduler:
- # As internal state of torch lr scheduler has been changed the following checks will fail
- return
+ assert lrs == pytest.approx([v for i, v in simulated_values])
# reexecute to check if no internal changes
# simulated_values = scheduler_cls.simulate_values(num_events=len(data) * max_epochs,
@@ -937,7 +948,7 @@ def test_create_lr_scheduler_with_warmup_asserts():
@pytest.mark.parametrize(
- "lrsched_warmup_config",
+ "lr_scheduler_name, warmup_start_value, warmup_end_value, warmup_duration, warmup_end_next_value",
[
# A) opt lr != warmup_end_value
("ExponentialLR", 0.01, 0.05, 10, 0.2),
@@ -955,15 +966,9 @@ def test_create_lr_scheduler_with_warmup_asserts():
("ExponentialLR", 0.01, None, 10, 0.2 * 0.98),
],
)
-def test_create_lr_scheduler_with_warmup(lrsched_warmup_config):
-
- (
- lr_scheduler_name,
- warmup_start_value,
- warmup_end_value,
- warmup_duration,
- warmup_end_next_value,
- ) = lrsched_warmup_config
+def test_create_lr_scheduler_with_warmup(
+ lr_scheduler_name, warmup_start_value, warmup_end_value, warmup_duration, warmup_end_next_value
+):
t1 = torch.zeros([1], requires_grad=True)
@@ -981,6 +986,11 @@ def test_create_lr_scheduler_with_warmup(lrsched_warmup_config):
num_iterations = 10
max_epochs = 20
+ if warmup_end_value is None:
+ expected_warmup_end_value = optimizer.param_groups[0]["lr"]
+ else:
+ expected_warmup_end_value = warmup_end_value
+
simulated_values = [None] * (num_iterations * max_epochs)
scheduler = create_lr_scheduler_with_warmup(
lr_scheduler,
@@ -989,8 +999,6 @@ def test_create_lr_scheduler_with_warmup(lrsched_warmup_config):
warmup_duration=warmup_duration,
output_simulated_values=simulated_values,
)
- if warmup_end_value is None:
- warmup_end_value = optimizer.param_groups[0]["lr"]
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
@@ -1007,11 +1015,11 @@ def test_create_lr_scheduler_with_warmup(lrsched_warmup_config):
lrs = []
trainer.run(data, max_epochs=max_epochs)
- assert lrs == pytest.approx([v for i, v in simulated_values])
+ assert lrs == pytest.approx([v for _, v in simulated_values])
assert lrs[0] == pytest.approx(warmup_start_value), f"lrs={lrs[: warmup_duration + num_iterations]}"
assert lrs[warmup_duration - 1] == pytest.approx(
- warmup_end_value
+ expected_warmup_end_value
), f"lrs={lrs[: warmup_duration + num_iterations]}"
assert lrs[warmup_duration] == pytest.approx(
warmup_end_next_value
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | absl-py==2.2.1
alembic==1.15.2
annotated-types==0.7.0
arrow==1.3.0
attrs==25.3.0
boto3==1.37.23
botocore==1.37.23
bravado==11.1.0
bravado-core==6.1.1
cachetools==5.5.2
certifi==2025.1.31
charset-normalizer==3.4.1
clearml==1.18.0
click==7.1.2
clipped==0.10.1
cloudpickle==2.2.1
codecov==2.1.13
contourpy==1.3.0
coverage==7.8.0
cycler==0.12.1
databricks-cli==0.18.0
dill==0.3.9
docker==6.1.3
docker-pycreds==0.4.0
durationpy==0.9
entrypoints==0.4
eval_type_backport==0.2.2
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
Flask==1.1.4
fonttools==4.56.0
fqdn==1.5.1
fsspec==2025.3.1
furl==2.1.4
future==1.0.0
gitdb==4.0.12
GitPython==3.1.44
google-auth==2.38.0
greenlet==3.1.1
grpcio==1.71.0
gunicorn==20.1.0
gym==0.26.2
gym-notices==0.0.8
hestia==0.6.0
hypertune==1.2.0
idna==3.10
imageio==2.37.0
importlib-metadata==5.2.0
importlib_resources==6.5.2
iniconfig==2.1.0
intel-cmplr-lib-ur==2025.1.0
intel-openmp==2025.1.0
isoduration==20.11.0
itsdangerous==1.1.0
Jinja2==2.10.3
jmespath==1.0.1
joblib==1.4.2
jsonpatch==1.33
jsonpointer==3.0.0
jsonref==1.1.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
kiwisolver==1.4.7
kubernetes==32.0.1
lazy_loader==0.4
Mako==1.3.9
Markdown==3.7
markdown-it-py==3.0.0
MarkupSafe==2.0.1
marshmallow==3.0.0rc5
matplotlib==3.9.4
mdurl==0.1.2
mkl==2025.1.0
mlflow==1.30.1
monotonic==1.6
mpmath==1.3.0
msgpack==1.1.0
neptune-client==1.13.0
networkx==3.2.1
nltk==3.9.1
numpy==1.26.4
nvidia-cublas-cu11==11.10.3.66
nvidia-cublas-cu12==12.4.5.8
nvidia-cuda-cupti-cu12==12.4.127
nvidia-cuda-nvrtc-cu11==11.7.99
nvidia-cuda-nvrtc-cu12==12.4.127
nvidia-cuda-runtime-cu11==11.7.99
nvidia-cuda-runtime-cu12==12.4.127
nvidia-cudnn-cu11==8.5.0.96
nvidia-cudnn-cu12==9.1.0.70
nvidia-cufft-cu12==11.2.1.3
nvidia-curand-cu12==10.3.5.147
nvidia-cusolver-cu12==11.6.1.9
nvidia-cusparse-cu12==12.3.1.170
nvidia-cusparselt-cu12==0.6.2
nvidia-ml-py==12.570.86
nvidia-nccl-cu12==2.21.5
nvidia-nvjitlink-cu12==12.4.127
nvidia-nvtx-cu12==12.4.127
oauthlib==3.2.2
orderedmultidict==1.0.1
orjson==3.10.16
packaging==21.3
pandas==1.5.3
pathlib2==2.3.7.post1
pillow==11.1.0
platformdirs==4.3.7
pluggy==1.5.0
polyaxon==2.7.0
polyaxon-client==0.6.1
polyaxon-schemas==0.6.1
polystores==0.2.5
prometheus_client==0.21.1
prometheus_flask_exporter==0.23.2
protobuf==4.25.6
psutil==7.0.0
py-rouge==1.1
pyasn1==0.6.1
pyasn1_modules==0.4.2
pydantic==2.11.1
pydantic_core==2.33.0
pygame==2.6.1
Pygments==2.19.1
PyJWT==2.9.0
pynvml==12.0.0
pyparsing==3.2.3
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytorch-fid==0.1.1
-e git+https://github.com/pytorch/ignite.git@727150e82fc3f5478717a6d0ea08c80db29b0e10#egg=pytorch_ignite
pytz==2022.7.1
PyYAML==6.0.2
querystring-parser==1.2.4
referencing==0.36.2
regex==2024.11.6
requests==2.32.3
requests-oauthlib==2.0.0
requests-toolbelt==1.0.0
rfc3339-validator==0.1.4
rfc3986-validator==0.1.1
rhea==0.5.5
rich==14.0.0
rpds-py==0.24.0
rsa==4.9
s3transfer==0.11.4
scikit-image==0.24.0
scikit-learn==1.6.1
scipy==1.13.1
sentry-sdk==2.5.1
setproctitle==1.3.5
simplejson==3.20.1
six==1.17.0
smmap==5.0.2
SQLAlchemy==1.4.54
sqlparse==0.5.3
swagger-spec-validator==3.0.4
sympy==1.13.1
tabulate==0.9.0
tbb==2022.1.0
tcmlib==1.3.0
tensorboard==2.19.0
tensorboard-data-server==0.7.2
tensorboardX==2.6.2.2
threadpoolctl==3.6.0
tifffile==2024.8.30
tomli==2.2.1
torch==1.13.1
torchvision==0.21.0
tornado==6.4.2
tqdm==4.67.1
traceml==1.2.0
triton==3.2.0
types-python-dateutil==2.9.0.20241206
typing-inspection==0.4.0
typing_extensions==4.13.0
umf==0.10.0
uri-template==1.3.0
urllib3==1.26.20
vents==0.6.2
visdom==0.2.4
wandb==0.19.8
webcolors==24.11.1
websocket-client==1.8.0
Werkzeug==1.0.1
zipp==3.21.0
| name: ignite
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- absl-py==2.2.1
- alembic==1.15.2
- annotated-types==0.7.0
- arrow==1.3.0
- attrs==25.3.0
- boto3==1.37.23
- botocore==1.37.23
- bravado==11.1.0
- bravado-core==6.1.1
- cachetools==5.5.2
- certifi==2025.1.31
- charset-normalizer==3.4.1
- clearml==1.18.0
- click==7.1.2
- clipped==0.10.1
- cloudpickle==2.2.1
- codecov==2.1.13
- contourpy==1.3.0
- coverage==7.8.0
- cycler==0.12.1
- databricks-cli==0.18.0
- dill==0.3.9
- docker==6.1.3
- docker-pycreds==0.4.0
- durationpy==0.9
- entrypoints==0.4
- eval-type-backport==0.2.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- flask==1.1.4
- fonttools==4.56.0
- fqdn==1.5.1
- fsspec==2025.3.1
- furl==2.1.4
- future==1.0.0
- gitdb==4.0.12
- gitpython==3.1.44
- google-auth==2.38.0
- greenlet==3.1.1
- grpcio==1.71.0
- gunicorn==20.1.0
- gym==0.26.2
- gym-notices==0.0.8
- hestia==0.6.0
- hypertune==1.2.0
- idna==3.10
- imageio==2.37.0
- importlib-metadata==5.2.0
- importlib-resources==6.5.2
- iniconfig==2.1.0
- intel-cmplr-lib-ur==2025.1.0
- intel-openmp==2025.1.0
- isoduration==20.11.0
- itsdangerous==1.1.0
- jinja2==2.10.3
- jmespath==1.0.1
- joblib==1.4.2
- jsonpatch==1.33
- jsonpointer==3.0.0
- jsonref==1.1.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- kiwisolver==1.4.7
- kubernetes==32.0.1
- lazy-loader==0.4
- mako==1.3.9
- markdown==3.7
- markdown-it-py==3.0.0
- markupsafe==2.0.1
- marshmallow==3.0.0rc5
- matplotlib==3.9.4
- mdurl==0.1.2
- mkl==2025.1.0
- mlflow==1.30.1
- monotonic==1.6
- mpmath==1.3.0
- msgpack==1.1.0
- neptune-client==1.13.0
- networkx==3.2.1
- nltk==3.9.1
- numpy==1.26.4
- nvidia-cublas-cu11==11.10.3.66
- nvidia-cublas-cu12==12.4.5.8
- nvidia-cuda-cupti-cu12==12.4.127
- nvidia-cuda-nvrtc-cu11==11.7.99
- nvidia-cuda-nvrtc-cu12==12.4.127
- nvidia-cuda-runtime-cu11==11.7.99
- nvidia-cuda-runtime-cu12==12.4.127
- nvidia-cudnn-cu11==8.5.0.96
- nvidia-cudnn-cu12==9.1.0.70
- nvidia-cufft-cu12==11.2.1.3
- nvidia-curand-cu12==10.3.5.147
- nvidia-cusolver-cu12==11.6.1.9
- nvidia-cusparse-cu12==12.3.1.170
- nvidia-cusparselt-cu12==0.6.2
- nvidia-ml-py==12.570.86
- nvidia-nccl-cu12==2.21.5
- nvidia-nvjitlink-cu12==12.4.127
- nvidia-nvtx-cu12==12.4.127
- oauthlib==3.2.2
- orderedmultidict==1.0.1
- orjson==3.10.16
- packaging==21.3
- pandas==1.5.3
- pathlib2==2.3.7.post1
- pillow==11.1.0
- platformdirs==4.3.7
- pluggy==1.5.0
- polyaxon==2.7.0
- polyaxon-client==0.6.1
- polyaxon-schemas==0.6.1
- polystores==0.2.5
- prometheus-client==0.21.1
- prometheus-flask-exporter==0.23.2
- protobuf==4.25.6
- psutil==7.0.0
- py-rouge==1.1
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pydantic==2.11.1
- pydantic-core==2.33.0
- pygame==2.6.1
- pygments==2.19.1
- pyjwt==2.9.0
- pynvml==12.0.0
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytorch-fid==0.1.1
- pytorch-ignite==0.5.0
- pytz==2022.7.1
- pyyaml==6.0.2
- querystring-parser==1.2.4
- referencing==0.36.2
- regex==2024.11.6
- requests==2.32.3
- requests-oauthlib==2.0.0
- requests-toolbelt==1.0.0
- rfc3339-validator==0.1.4
- rfc3986-validator==0.1.1
- rhea==0.5.5
- rich==14.0.0
- rpds-py==0.24.0
- rsa==4.9
- s3transfer==0.11.4
- scikit-image==0.24.0
- scikit-learn==1.6.1
- scipy==1.13.1
- sentry-sdk==2.5.1
- setproctitle==1.3.5
- setuptools==59.5.0
- simplejson==3.20.1
- six==1.17.0
- smmap==5.0.2
- sqlalchemy==1.4.54
- sqlparse==0.5.3
- swagger-spec-validator==3.0.4
- sympy==1.13.1
- tabulate==0.9.0
- tbb==2022.1.0
- tcmlib==1.3.0
- tensorboard==2.19.0
- tensorboard-data-server==0.7.2
- tensorboardx==2.6.2.2
- threadpoolctl==3.6.0
- tifffile==2024.8.30
- tomli==2.2.1
- torch==1.13.1
- torchvision==0.21.0
- tornado==6.4.2
- tqdm==4.67.1
- traceml==1.2.0
- triton==3.2.0
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- umf==0.10.0
- uri-template==1.3.0
- urllib3==1.26.20
- vents==0.6.2
- visdom==0.2.4
- wandb==0.19.8
- webcolors==24.11.1
- websocket-client==1.8.0
- werkzeug==1.0.1
- zipp==3.21.0
prefix: /opt/conda/envs/ignite
| [
"tests/ignite/handlers/test_param_scheduler.py::test_lr_scheduler[StepLR-kwargs0]",
"tests/ignite/handlers/test_param_scheduler.py::test_lr_scheduler[ExponentialLR-kwargs1]",
"tests/ignite/handlers/test_param_scheduler.py::test_lr_scheduler[MultiplicativeLR-kwargs2]",
"tests/ignite/handlers/test_param_scheduler.py::test_simulate_and_plot_values"
] | [
"tests/ignite/contrib/engines/test_common.py::test_asserts_setup_common_training_handlers",
"tests/ignite/contrib/engines/test_common.py::test_setup_neptune_logging"
] | [
"tests/ignite/contrib/engines/test_common.py::test_no_warning_with_train_sampler",
"tests/ignite/contrib/engines/test_common.py::test_setup_common_training_handlers",
"tests/ignite/contrib/engines/test_common.py::test_setup_common_training_handlers_using_save_handler",
"tests/ignite/contrib/engines/test_common.py::test_save_best_model_by_val_score",
"tests/ignite/contrib/engines/test_common.py::test_gen_save_best_models_by_val_score",
"tests/ignite/contrib/engines/test_common.py::test_add_early_stopping_by_val_score",
"tests/ignite/contrib/engines/test_common.py::test_deprecated_setup_any_logging",
"tests/ignite/contrib/engines/test_common.py::test__setup_logging_wrong_args",
"tests/ignite/contrib/engines/test_common.py::test_setup_tb_logging",
"tests/ignite/contrib/engines/test_common.py::test_setup_visdom_logging",
"tests/ignite/contrib/engines/test_common.py::test_setup_plx_logging",
"tests/ignite/contrib/engines/test_common.py::test_setup_mlflow_logging",
"tests/ignite/contrib/engines/test_common.py::test_setup_wandb_logging",
"tests/ignite/contrib/engines/test_common.py::test_setup_clearml_logging",
"tests/ignite/contrib/engines/test_common.py::test_distrib_gloo_cpu_or_gpu",
"tests/ignite/handlers/test_param_scheduler.py::test_param_scheduler_asserts",
"tests/ignite/handlers/test_param_scheduler.py::test_linear_scheduler",
"tests/ignite/handlers/test_param_scheduler.py::test_linear_scheduler_cycle_size_two",
"tests/ignite/handlers/test_param_scheduler.py::test_cosine_annealing_scheduler",
"tests/ignite/handlers/test_param_scheduler.py::test_concat_scheduler_asserts",
"tests/ignite/handlers/test_param_scheduler.py::test_concat_scheduler_state_dict",
"tests/ignite/handlers/test_param_scheduler.py::test_concat_scheduler_two_schedulers[False]",
"tests/ignite/handlers/test_param_scheduler.py::test_concat_scheduler_two_schedulers[True]",
"tests/ignite/handlers/test_param_scheduler.py::test_concat_scheduler_two_linear",
"tests/ignite/handlers/test_param_scheduler.py::test_concat_scheduler_3_schedulers",
"tests/ignite/handlers/test_param_scheduler.py::test_save_param_history",
"tests/ignite/handlers/test_param_scheduler.py::test_lr_scheduler_asserts",
"tests/ignite/handlers/test_param_scheduler.py::test_piecewiselinear_asserts",
"tests/ignite/handlers/test_param_scheduler.py::test_piecewiselinear[True]",
"tests/ignite/handlers/test_param_scheduler.py::test_piecewiselinear[False]",
"tests/ignite/handlers/test_param_scheduler.py::test_create_lr_scheduler_with_warmup_asserts",
"tests/ignite/handlers/test_param_scheduler.py::test_create_lr_scheduler_with_warmup[ExponentialLR-0.01-0.05-10-0.2]",
"tests/ignite/handlers/test_param_scheduler.py::test_create_lr_scheduler_with_warmup[ExponentialLR-0.01-0.05-2-0.2]",
"tests/ignite/handlers/test_param_scheduler.py::test_create_lr_scheduler_with_warmup[ExponentialLR-0.01-0.2-10-0.196]",
"tests/ignite/handlers/test_param_scheduler.py::test_create_lr_scheduler_with_warmup[ExponentialLR-0.01-0.2-2-0.196]",
"tests/ignite/handlers/test_param_scheduler.py::test_create_lr_scheduler_with_warmup[LinearCyclicalScheduler-0.01-0.05-10-0.8]",
"tests/ignite/handlers/test_param_scheduler.py::test_create_lr_scheduler_with_warmup[LinearCyclicalScheduler-0.01-0.05-2-0.8]",
"tests/ignite/handlers/test_param_scheduler.py::test_create_lr_scheduler_with_warmup[LinearCyclicalScheduler-0.01-0.8-10-0.64]",
"tests/ignite/handlers/test_param_scheduler.py::test_create_lr_scheduler_with_warmup[LinearCyclicalScheduler-0.01-0.8-2-0.64]",
"tests/ignite/handlers/test_param_scheduler.py::test_create_lr_scheduler_with_warmup[ExponentialLR-0.01-None-10-0.196]",
"tests/ignite/handlers/test_param_scheduler.py::test_create_lr_scheduler_with_warmup_on_combined_scheduler[False]",
"tests/ignite/handlers/test_param_scheduler.py::test_create_lr_scheduler_with_warmup_on_combined_scheduler[True]",
"tests/ignite/handlers/test_param_scheduler.py::test_create_lr_scheduler_with_warmup_with_real_model",
"tests/ignite/handlers/test_param_scheduler.py::test_param_group_scheduler_asserts",
"tests/ignite/handlers/test_param_scheduler.py::test_param_group_scheduler",
"tests/ignite/handlers/test_param_scheduler.py::test_scheduler_with_param_groups",
"tests/ignite/handlers/test_param_scheduler.py::test_lr_scheduling_on_non_torch_optimizers",
"tests/ignite/handlers/test_param_scheduler.py::test_reduce_lr_on_plateau_scheduler",
"tests/ignite/handlers/test_param_scheduler.py::test_reduce_lr_on_plateau_scheduler_asserts"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,310 | 1,449 | [
"ignite/contrib/engines/common.py",
"ignite/handlers/param_scheduler.py"
] |
horejsek__python-fastjsonschema-144 | d63d682e003aa89a5cf6fc0b8cf6f477c34931ba | 2022-02-27 23:11:24 | d63d682e003aa89a5cf6fc0b8cf6f477c34931ba | diff --git a/fastjsonschema/draft04.py b/fastjsonschema/draft04.py
index 7dd097e..8c25863 100644
--- a/fastjsonschema/draft04.py
+++ b/fastjsonschema/draft04.py
@@ -413,19 +413,23 @@ class CodeGeneratorDraft04(CodeGenerator):
self.exc('{name} must contain only specified items', rule='items')
else:
with self.l('for {variable}_x, {variable}_item in enumerate({variable}[{0}:], {0}):', len(items_definition)):
- self.generate_func_code_block(
+ count = self.generate_func_code_block(
self._definition['additionalItems'],
'{}_item'.format(self._variable),
'{}[{{{}_x}}]'.format(self._variable_name, self._variable),
)
+ if count == 0:
+ self.l('pass')
else:
if items_definition:
with self.l('for {variable}_x, {variable}_item in enumerate({variable}):'):
- self.generate_func_code_block(
+ count = self.generate_func_code_block(
items_definition,
'{}_item'.format(self._variable),
'{}[{{{}_x}}]'.format(self._variable_name, self._variable),
)
+ if count == 0:
+ self.l('pass')
def generate_min_properties(self):
self.create_variable_is_dict()
diff --git a/fastjsonschema/generator.py b/fastjsonschema/generator.py
index 5e08030..c5b57aa 100644
--- a/fastjsonschema/generator.py
+++ b/fastjsonschema/generator.py
@@ -143,6 +143,8 @@ class CodeGenerator:
def generate_func_code_block(self, definition, variable, variable_name, clear_variables=False):
"""
Creates validation rules for current definition.
+
+ Returns the number of validation rules generated as code.
"""
backup = self._definition, self._variable, self._variable_name
self._definition, self._variable, self._variable_name = definition, variable, variable_name
@@ -150,25 +152,31 @@ class CodeGenerator:
backup_variables = self._variables
self._variables = set()
- self._generate_func_code_block(definition)
+ count = self._generate_func_code_block(definition)
self._definition, self._variable, self._variable_name = backup
if clear_variables:
self._variables = backup_variables
+ return count
+
def _generate_func_code_block(self, definition):
if not isinstance(definition, dict):
raise JsonSchemaDefinitionException("definition must be an object")
if '$ref' in definition:
# needed because ref overrides any sibling keywords
- self.generate_ref()
+ return self.generate_ref()
else:
- self.run_generate_functions(definition)
+ return self.run_generate_functions(definition)
def run_generate_functions(self, definition):
+ """Returns the number of generate functions that were executed."""
+ count = 0
for key, func in self._json_keywords_to_function.items():
if key in definition:
func()
+ count += 1
+ return count
def generate_ref(self):
"""
| Missleading IndentationError with a faulty schema
The following (faulty) schema
```
{
"type": "object",
"properties": {
"a": {
"type": "array",
"items": {
"b": {
"type": "string"
}
}
}
}
}
```
raises a missleading `IndentationError: expected an indented block`. This is especially confusing as jsonschema does not raise an error. | horejsek/python-fastjsonschema | diff --git a/tests/test_array.py b/tests/test_array.py
index 99c9254..3da479f 100644
--- a/tests/test_array.py
+++ b/tests/test_array.py
@@ -180,3 +180,22 @@ def test_mixed_arrays(asserter, value, expected):
},
}, value, expected)
+
+def test_issue_114(asserter):
+ """Prevent the faulty scheme to generate an empty for-loop."""
+ schema = {
+ "type": "object",
+ "properties": {
+ "a": {
+ "type": "array",
+ "items": {
+ "b": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ value = {"a": []}
+ expected = value
+ asserter(schema, value, expected)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | 2.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[devel]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"git submodule init",
"git submodule update"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==2.11.7
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
colorama==0.4.5
dill==0.3.4
execnet==1.9.0
-e git+https://github.com/horejsek/python-fastjsonschema.git@d63d682e003aa89a5cf6fc0b8cf6f477c34931ba#egg=fastjsonschema
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==5.10.1
json-spec==0.10.1
jsonschema==3.2.0
lazy-object-proxy==1.7.1
mccabe==0.7.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
py-cpuinfo==9.0.0
pylint==2.13.9
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pyrsistent==0.18.0
pytest==6.2.4
pytest-benchmark==3.4.1
pytest-cache==1.0
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typed-ast==1.5.5
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
validictory==1.1.3
wrapt==1.16.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: python-fastjsonschema
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==2.11.7
- colorama==0.4.5
- dill==0.3.4
- execnet==1.9.0
- isort==5.10.1
- json-spec==0.10.1
- jsonschema==3.2.0
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- platformdirs==2.4.0
- py-cpuinfo==9.0.0
- pylint==2.13.9
- pyrsistent==0.18.0
- pytest-benchmark==3.4.1
- pytest-cache==1.0
- six==1.17.0
- tomli==1.2.3
- typed-ast==1.5.5
- validictory==1.1.3
- wrapt==1.16.0
prefix: /opt/conda/envs/python-fastjsonschema
| [
"tests/test_array.py::test_issue_114"
] | [] | [
"tests/test_array.py::test_array[0-expected0]",
"tests/test_array.py::test_array[None-expected1]",
"tests/test_array.py::test_array[True-expected2]",
"tests/test_array.py::test_array[False-expected3]",
"tests/test_array.py::test_array[abc-expected4]",
"tests/test_array.py::test_array[value5-expected5]",
"tests/test_array.py::test_array[value6-expected6]",
"tests/test_array.py::test_array[value7-expected7]",
"tests/test_array.py::test_max_items[value0-expected0]",
"tests/test_array.py::test_max_items[value1-expected1]",
"tests/test_array.py::test_max_items[value2-expected2]",
"tests/test_array.py::test_max_items[value3-expected3]",
"tests/test_array.py::test_min_items[value0-expected0]",
"tests/test_array.py::test_min_items[value1-expected1]",
"tests/test_array.py::test_min_items[value2-expected2]",
"tests/test_array.py::test_min_items[value3-expected3]",
"tests/test_array.py::test_unique_items[value0-expected0]",
"tests/test_array.py::test_unique_items[value1-expected1]",
"tests/test_array.py::test_unique_items[value2-expected2]",
"tests/test_array.py::test_unique_items[value3-expected3]",
"tests/test_array.py::test_unique_items[value4-expected4]",
"tests/test_array.py::test_unique_items[value5-expected5]",
"tests/test_array.py::test_unique_items[value6-expected6]",
"tests/test_array.py::test_unique_items[value7-expected7]",
"tests/test_array.py::test_unique_items[value8-expected8]",
"tests/test_array.py::test_unique_items[value9-expected9]",
"tests/test_array.py::test_unique_items[value10-expected10]",
"tests/test_array.py::test_unique_items[value11-expected11]",
"tests/test_array.py::test_unique_items[value12-expected12]",
"tests/test_array.py::test_unique_items[value13-expected13]",
"tests/test_array.py::test_unique_items[value14-expected14]",
"tests/test_array.py::test_unique_items[value15-expected15]",
"tests/test_array.py::test_unique_items[value16-expected16]",
"tests/test_array.py::test_unique_items[value17-expected17]",
"tests/test_array.py::test_min_and_unique_items",
"tests/test_array.py::test_items_all_same[value0-expected0]",
"tests/test_array.py::test_items_all_same[value1-expected1]",
"tests/test_array.py::test_items_all_same[value2-expected2]",
"tests/test_array.py::test_different_items[value0-expected0]",
"tests/test_array.py::test_different_items[value1-expected1]",
"tests/test_array.py::test_different_items[value2-expected2]",
"tests/test_array.py::test_different_items[value3-expected3]",
"tests/test_array.py::test_different_items[value4-expected4]",
"tests/test_array.py::test_different_items[value5-expected5]",
"tests/test_array.py::test_different_items_with_additional_items[value0-expected0]",
"tests/test_array.py::test_different_items_with_additional_items[value1-expected1]",
"tests/test_array.py::test_different_items_with_additional_items[value2-expected2]",
"tests/test_array.py::test_different_items_with_additional_items[value3-expected3]",
"tests/test_array.py::test_different_items_with_additional_items[value4-expected4]",
"tests/test_array.py::test_different_items_with_additional_items[value5-expected5]",
"tests/test_array.py::test_different_items_without_additional_items[value0-expected0]",
"tests/test_array.py::test_different_items_without_additional_items[value1-expected1]",
"tests/test_array.py::test_different_items_without_additional_items[value2-expected2]",
"tests/test_array.py::test_different_items_without_additional_items[value3-expected3]",
"tests/test_array.py::test_different_items_without_additional_items[value4-expected4]",
"tests/test_array.py::test_different_items_without_additional_items[value5-expected5]",
"tests/test_array.py::test_tuples_as_arrays[value0-expected0]",
"tests/test_array.py::test_tuples_as_arrays[value1-expected1]",
"tests/test_array.py::test_tuples_as_arrays[value2-expected2]",
"tests/test_array.py::test_tuples_as_arrays[value3-expected3]",
"tests/test_array.py::test_mixed_arrays[value0-expected0]",
"tests/test_array.py::test_mixed_arrays[value1-expected1]"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,311 | 742 | [
"fastjsonschema/draft04.py",
"fastjsonschema/generator.py"
] |
|
mathandy__svgpathtools-170 | c84c897bf2121ed86ceed45b4e027785351c2fd5 | 2022-02-27 23:49:11 | c84c897bf2121ed86ceed45b4e027785351c2fd5 | mathandy: Looks good. Thanks @[chanicpanic](https://github.com/chanicpanic)! | diff --git a/svgpathtools/document.py b/svgpathtools/document.py
index f88f5ba..1dd9077 100644
--- a/svgpathtools/document.py
+++ b/svgpathtools/document.py
@@ -289,7 +289,7 @@ class Document:
# If given a list of strings (one or more), assume it represents
# a sequence of nested group names
- elif all(isinstance(elem, str) for elem in group):
+ elif len(group) > 0 and all(isinstance(elem, str) for elem in group):
group = self.get_or_add_group(group)
elif not isinstance(group, Element):
| Document does not add path to empty group
When the passed group element has no children, `Document.add_path` incorrectly adds the path to the root element.
Example:
```python
doc = Document()
empty = doc.add_group(group_attribs={"id": "empty"})
doc.add_path("M 0,0 L 1,1", group=empty)
print(doc)
```
Output:
```svg
<svg xmlns:svg="http://www.w3.org/2000/svg"><svg:g id="empty" /><path d="M 0,0 L 1,1" /></svg>
```
Expected Output:
```svg
<svg xmlns:svg="http://www.w3.org/2000/svg"><svg:g id="empty"><path d="M 0,0 L 1,1" /></svg:g></svg>
``` | mathandy/svgpathtools | diff --git a/test/test_groups.py b/test/test_groups.py
index 44b6cb9..aeb3393 100644
--- a/test/test_groups.py
+++ b/test/test_groups.py
@@ -235,4 +235,11 @@ class TestGroups(unittest.TestCase):
path = parse_path(path_d)
svg_path = doc.add_path(path, group=new_leaf)
- self.assertEqual(path_d, svg_path.get('d'))
\ No newline at end of file
+ self.assertEqual(path_d, svg_path.get('d'))
+
+ # Test that paths are added to the correct group
+ new_sibling = doc.get_or_add_group(
+ ['base_group', 'new_parent', 'new_sibling'])
+ doc.add_path(path, group=new_sibling)
+ self.assertEqual(len(new_sibling), 1)
+ self.assertEqual(path_d, new_sibling[0].get('d'))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
numpy==2.0.2
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
scipy==1.13.1
-e git+https://github.com/mathandy/svgpathtools.git@c84c897bf2121ed86ceed45b4e027785351c2fd5#egg=svgpathtools
svgwrite==1.4.3
tomli==2.2.1
| name: svgpathtools
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- numpy==2.0.2
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- scipy==1.13.1
- svgwrite==1.4.3
- tomli==2.2.1
prefix: /opt/conda/envs/svgpathtools
| [
"test/test_groups.py::TestGroups::test_add_group"
] | [] | [
"test/test_groups.py::TestGroups::test_group_flatten",
"test/test_groups.py::TestGroups::test_nested_group"
] | [] | MIT License | 12,312 | 156 | [
"svgpathtools/document.py"
] |
just-work__fffw-136 | 19f969ee55c1ed08a228af38eab116db2d9aea81 | 2022-02-28 10:22:38 | 19f969ee55c1ed08a228af38eab116db2d9aea81 | diff --git a/fffw/encoding/codecs.py b/fffw/encoding/codecs.py
index cfe68f9..4857c38 100644
--- a/fffw/encoding/codecs.py
+++ b/fffw/encoding/codecs.py
@@ -82,6 +82,53 @@ class Copy(outputs.Codec):
:returns: edge pointing to an input stream
"""
+ # Running parent method for side effects like stream validation, like if
+ # Copy is compatible with filter graph.
+ super().connect_edge(edge)
+
+ # Ensure that between source stream and copy codec there is no
+ # processing filters. Only split filter is allowed.
+ src = self._validate_filter_chain(edge)
+
+ if edge.input is src:
+ # Copy codec is being connected directly to Source, no more actions
+ # are needed.
+ return edge
+
+ # There are some Splits between Source and Copy. Current edge is not
+ # needed anymore because new Edge will be added directly to the Source.
+ # Recursively removing it from Splits chain.
+ self._remove_edge(edge)
+ # Connecting Copy to a Source directly using new node.
+ src.connect_dest(self)
+ return self.edge
+
+ def _remove_edge(self, edge: base.Edge) -> None:
+ """
+ Remove edge mentions from graph and current instance like it never
+ existed.
+
+ This method is used for reconnecting Copy codec from an end of Split
+ filter chain directly to Source.
+ """
+ # Remove and edge from existing Split filter chain
+ split = edge.input
+ if not isinstance(split, filters.Split): # pragma: no cover
+ # Method is only called from connect_edge() in case of split
+ # filter presence.
+ raise TypeError("Can't disconnect and edge from real filter")
+ split.disconnect(edge)
+ # As the Edge is thrown away, forgot about it.
+ self._edge = None
+
+ @staticmethod
+ def _validate_filter_chain(edge: base.Edge) -> base.Source:
+ """
+ Ensures that Copy codec is being connected to a filter chain that
+ contains only Split filters.
+
+ :returns: Source stream passed to Copy codec.
+ """
src = edge.input
# Ensure that edge is connected to a source with only split filters
# in between.
@@ -89,8 +136,4 @@ class Copy(outputs.Codec):
src = src.input.input
if not isinstance(src, base.Source):
raise ValueError('copy codec can be connected only to source')
- src = edge.input
- if isinstance(src, filters.Split):
- # Remove current edge from filter graph
- edge = src.disconnect(edge)
- return super().connect_edge(edge)
+ return src
diff --git a/fffw/graph/base.py b/fffw/graph/base.py
index fe5b40d..6881fde 100644
--- a/fffw/graph/base.py
+++ b/fffw/graph/base.py
@@ -93,9 +93,11 @@ class Dest(Traversable):
:return Edge: connected edge
"""
if not isinstance(edge, Edge):
- raise ValueError("Only edge allowed")
+ raise TypeError("Only edge allowed")
if self._edge is not None:
- raise RuntimeError("Dest is already connected to %s" % self._edge)
+ raise RuntimeError("Dest input edge is already connected")
+ if edge.output is not self:
+ raise ValueError("Edge output is connected to another dest")
self._edge = edge
return edge
@@ -392,7 +394,9 @@ class Node(Traversable, abc.ABC):
:returns: connected edge
"""
if not isinstance(edge, Edge):
- raise ValueError("only edge allowed")
+ raise TypeError("only edge allowed")
+ if edge.output is not self:
+ raise ValueError("Edge output is connected to another node")
self.inputs[self.inputs.index(None)] = edge
return edge
| Copy codec bug
```
source - split(1) - split(3) - Copy x 3
```
In this case Copy codec is connected to split(1) instead of source, followed by some errors.
```
src = edge.input
# Ensure that edge is connected to a source with only split filters
# in between.
source_edge = edge
while isinstance(src, filters.Split):
source_edge = src.input
src = source_edge.input
if not isinstance(src, base.Source):
raise ValueError('copy codec can be connected only to source')
src = edge.input
if isinstance(src, filters.Split):
# Remove current edge from filter graph
src.disconnect(edge)
return super().connect_edge(source_edge)
```
Source edge must be used instead of current one. | just-work/fffw | diff --git a/tests/test_graph.py b/tests/test_graph.py
index c6edf1b..983a7e2 100644
--- a/tests/test_graph.py
+++ b/tests/test_graph.py
@@ -7,6 +7,7 @@ from fffw.encoding import inputs, outputs, codecs
from fffw.encoding.complex import FilterComplex
from fffw.encoding.filters import *
from fffw.graph import *
+from fffw.graph import base
from fffw.wrapper import param
@@ -35,6 +36,71 @@ class FdkAAC(codecs.AudioCodec):
return replace(ensure_audio(*metadata), bitrate=self.bitrate)
+class SourceImpl(base.Source):
+
+ @property
+ def name(self) -> str: # pragma: no cover
+ return ''
+
+
+class NodeImpl(base.Node):
+
+ @property
+ def args(self) -> str: # pragma: no cover
+ return ''
+
+
+class DestImpl(base.Dest):
+ pass
+
+
+class GraphBaseTestCase(TestCase):
+ def setUp(self) -> None:
+ super().setUp()
+ self.source = SourceImpl(VIDEO)
+ self.node = NodeImpl()
+ self.another = NodeImpl()
+ self.dest = DestImpl()
+ self.source_edge = base.Edge(self.source, self.node)
+ self.inter_edge = base.Edge(self.node, self.another)
+ self.dest_edge = base.Edge(self.another, self.dest)
+
+ def test_node_connect_edge_validation(self):
+ """
+ Checks edge validation for Node.
+ """
+
+ with self.subTest("only edge allowed"):
+ with self.assertRaises(TypeError):
+ self.node.connect_edge(object()) # type: ignore
+
+ with self.subTest("edge output cross-link"):
+ with self.assertRaises(ValueError):
+ self.node.connect_edge(self.dest_edge)
+
+ with self.subTest("success"):
+ self.node.connect_edge(self.source_edge)
+
+ def test_dest_connect_edge_validation(self):
+ """
+ Checks edge validation for Dest.
+ """
+ with self.subTest("only edge allowed"):
+ with self.assertRaises(TypeError):
+ self.dest.connect_edge(object()) # type: ignore
+
+ with self.subTest("edge output cross-link"):
+ with self.assertRaises(ValueError):
+ self.dest.connect_edge(self.source_edge)
+
+ with self.subTest("success"):
+ self.dest.connect_edge(self.dest_edge)
+
+ with self.subTest("slot is busy"):
+ with self.assertRaises(RuntimeError):
+ self.dest.connect_edge(self.dest_edge)
+
+
class FilterGraphBaseTestCase(TestCase):
def setUp(self) -> None:
@@ -322,9 +388,9 @@ class FilterGraphTestCase(FilterGraphBaseTestCase):
vs2 | c
vs3 | c
expected = (
- deepcopy(vs1.meta.scenes) +
- deepcopy(vs2.meta.scenes) +
- deepcopy(vs3.meta.scenes)
+ deepcopy(vs1.meta.scenes) +
+ deepcopy(vs2.meta.scenes) +
+ deepcopy(vs3.meta.scenes)
)
assert len(expected) == 3
current_duration = TS(0)
@@ -523,12 +589,41 @@ class CopyCodecTestCase(FilterGraphBaseTestCase):
s1 = split
s2 = split | Scale(1920, 1080)
- s1 > codecs.Copy(kind=VIDEO)
+ copy = s1 > codecs.Copy(kind=VIDEO)
# one output left
self.assertListEqual(split.outputs, [s2.input])
# split is disabled because of single output
self.assertFalse(split.enabled)
+ # copy codec is connected to source
+ self.assertIs(copy.edge.input, self.source.video)
+
+ def test_split_disconnect_transient(self):
+ """
+ With multiple splits, copy codec is being disconnected from all of them.
+ """
+ video = self.source.video
+ inter = video | Split(VIDEO, output_count=1)
+ split = inter | Split(VIDEO, output_count=2)
+ s1 = split
+ s2 = split | Scale(1920, 1080)
+
+ copy = s1 > codecs.Copy(kind=VIDEO)
+
+ # one output left
+ self.assertListEqual(split.outputs, [s2.input])
+ # split is disabled because of single output
+ self.assertFalse(split.enabled)
+
+ # intermediate split is still connected to another split
+ self.assertIs(inter.output.output, split)
+ # copy codec is connected to source
+ self.assertIs(copy.edge.input, video)
+ # source is still connected to split
+ edges = video._outputs
+ expected = [copy.edge, inter.input]
+ self.assertEqual(len(edges), 2)
+ self.assertSetEqual(set(edges), set(expected))
def test_split_disconnect_on_single_output(self):
"""
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 3.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y ffmpeg mediainfo"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
-e git+https://github.com/just-work/fffw.git@19f969ee55c1ed08a228af38eab116db2d9aea81#egg=fffw
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pymediainfo==5.1.0
pytest==8.3.5
tomli==2.2.1
| name: fffw
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pymediainfo==5.1.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/fffw
| [
"tests/test_graph.py::GraphBaseTestCase::test_dest_connect_edge_validation",
"tests/test_graph.py::GraphBaseTestCase::test_node_connect_edge_validation",
"tests/test_graph.py::CopyCodecTestCase::test_split_disconnect_transient"
] | [] | [
"tests/test_graph.py::FilterGraphTestCase::test_any_hardware_filter",
"tests/test_graph.py::FilterGraphTestCase::test_audio_trim_metadata",
"tests/test_graph.py::FilterGraphTestCase::test_codec_metadata_transform",
"tests/test_graph.py::FilterGraphTestCase::test_concat_audio_metadata",
"tests/test_graph.py::FilterGraphTestCase::test_concat_scenes",
"tests/test_graph.py::FilterGraphTestCase::test_concat_video_metadata",
"tests/test_graph.py::FilterGraphTestCase::test_disabled_filters",
"tests/test_graph.py::FilterGraphTestCase::test_ensure_audio",
"tests/test_graph.py::FilterGraphTestCase::test_ensure_video",
"tests/test_graph.py::FilterGraphTestCase::test_filter_graph",
"tests/test_graph.py::FilterGraphTestCase::test_filter_validates_hardware_device",
"tests/test_graph.py::FilterGraphTestCase::test_filter_validates_stream_kind",
"tests/test_graph.py::FilterGraphTestCase::test_overlay_metadata",
"tests/test_graph.py::FilterGraphTestCase::test_scale_changes_metadata",
"tests/test_graph.py::FilterGraphTestCase::test_setpts_metadata",
"tests/test_graph.py::FilterGraphTestCase::test_skip_not_connected_sources",
"tests/test_graph.py::FilterGraphTestCase::test_split_args",
"tests/test_graph.py::FilterGraphTestCase::test_split_enable",
"tests/test_graph.py::FilterGraphTestCase::test_upload_filter_clone",
"tests/test_graph.py::FilterGraphTestCase::test_video_trim_end_of_stream",
"tests/test_graph.py::FilterGraphTestCase::test_video_trim_metadata",
"tests/test_graph.py::CopyCodecTestCase::test_copy_codec_filter_forbidden",
"tests/test_graph.py::CopyCodecTestCase::test_copy_codec_kind_required",
"tests/test_graph.py::CopyCodecTestCase::test_copy_codec_transient_filter_forbidden",
"tests/test_graph.py::CopyCodecTestCase::test_deny_disconnect_from_other_filters",
"tests/test_graph.py::CopyCodecTestCase::test_disconnect_split_without_parent",
"tests/test_graph.py::CopyCodecTestCase::test_end_disconnect_on_source",
"tests/test_graph.py::CopyCodecTestCase::test_split_disconnect_on_copy_codec",
"tests/test_graph.py::CopyCodecTestCase::test_split_disconnect_on_single_output"
] | [] | MIT License | 12,316 | 926 | [
"fffw/encoding/codecs.py",
"fffw/graph/base.py"
] |
|
dask__distributed-5878 | fb8484ece6fd320a5c79d3ec0a07c72913905adb | 2022-02-28 15:50:49 | 2d3fddc14d1e06fd06b9f0f4c3256f254f3c670e | github-actions[bot]: ## Unit Test Results
12 files ±0 12 suites ±0 7h 24m 25s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "duration of all tests") - 3m 58s
2 621 tests +1 2 540 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "passed tests") +3 80 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "skipped / disabled tests") ±0 1 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "failed tests") - 2
15 650 runs +6 14 784 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "passed tests") +5 864 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "skipped / disabled tests") +2 2 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "failed tests") - 1
For more details on these failures, see [this check](https://github.com/dask/distributed/runs/5363326917).
Results for commit 3484f0c5. ± Comparison against base commit fb8484ec.
| diff --git a/distributed/nanny.py b/distributed/nanny.py
index 55c7838d..65a2d303 100644
--- a/distributed/nanny.py
+++ b/distributed/nanny.py
@@ -13,7 +13,7 @@ from contextlib import suppress
from inspect import isawaitable
from queue import Empty
from time import sleep as sync_sleep
-from typing import ClassVar
+from typing import TYPE_CHECKING, ClassVar, Literal
import psutil
from tornado import gen
@@ -45,6 +45,9 @@ from .utils import (
)
from .worker import Worker, parse_memory_limit, run
+if TYPE_CHECKING:
+ from .diagnostics.plugin import NannyPlugin
+
logger = logging.getLogger(__name__)
@@ -94,6 +97,7 @@ class Nanny(ServerNode):
services=None,
name=None,
memory_limit="auto",
+ memory_terminate_fraction: float | Literal[False] | None = None,
reconnect=True,
validate=False,
quiet=False,
@@ -203,8 +207,10 @@ class Nanny(ServerNode):
self.worker_kwargs = worker_kwargs
self.contact_address = contact_address
- self.memory_terminate_fraction = dask.config.get(
- "distributed.worker.memory.terminate"
+ self.memory_terminate_fraction = (
+ memory_terminate_fraction
+ if memory_terminate_fraction is not None
+ else dask.config.get("distributed.worker.memory.terminate")
)
self.services = services
@@ -231,7 +237,7 @@ class Nanny(ServerNode):
"plugin_remove": self.plugin_remove,
}
- self.plugins = {}
+ self.plugins: dict[str, NannyPlugin] = {}
super().__init__(
handlers=handlers, io_loop=self.loop, connection_args=self.connection_args
diff --git a/distributed/system_monitor.py b/distributed/system_monitor.py
index 0694facd..1d2a9752 100644
--- a/distributed/system_monitor.py
+++ b/distributed/system_monitor.py
@@ -72,10 +72,18 @@ class SystemMonitor:
except IndexError:
return {k: None for k, v in self.quantities.items()}
+ def get_process_memory(self) -> int:
+ """Sample process memory, as reported by the OS.
+ This one-liner function exists so that it can be easily mocked in unit tests,
+ as the OS allocating and releasing memory is highly volatile and a constant
+ source of flakiness.
+ """
+ return self.proc.memory_info().rss
+
def update(self):
with self.proc.oneshot():
cpu = self.proc.cpu_percent()
- memory = self.proc.memory_info().rss
+ memory = self.get_process_memory()
now = time()
self.cpu.append(cpu)
diff --git a/distributed/worker.py b/distributed/worker.py
index 3195fe38..443b0364 100644
--- a/distributed/worker.py
+++ b/distributed/worker.py
@@ -3701,8 +3701,7 @@ class Worker(ServerNode):
self._memory_monitoring = True
total = 0
- proc = self.monitor.proc
- memory = proc.memory_info().rss
+ memory = self.monitor.get_process_memory()
frac = memory / self.memory_limit
def check_pause(memory):
@@ -3743,7 +3742,6 @@ class Worker(ServerNode):
"Worker is at %.0f%% memory usage. Start spilling data to disk.",
frac * 100,
)
- start = time()
# Implement hysteresis cycle where spilling starts at the spill threshold
# and stops at the target threshold. Normally that here the target threshold
# defines process memory, whereas normally it defines reported managed
@@ -3768,25 +3766,21 @@ class Worker(ServerNode):
break
weight = self.data.evict()
if weight == -1:
- # Failed to evict: disk full, spill size limit exceeded, or pickle error
+ # Failed to evict:
+ # disk full, spill size limit exceeded, or pickle error
break
total += weight
count += 1
- # If the current buffer is filled with a lot of small values,
- # evicting one at a time is very slow and the worker might
- # generate new data faster than it is able to evict. Therefore,
- # only pass on control if we spent at least 0.5s evicting
- if time() - start > 0.5:
- await asyncio.sleep(0)
- start = time()
- memory = proc.memory_info().rss
+ await asyncio.sleep(0)
+
+ memory = self.monitor.get_process_memory()
if total > need and memory > target:
# Issue a GC to ensure that the evicted data is actually
# freed from memory and taken into account by the monitor
# before trying to evict even more data.
self._throttled_gc.collect()
- memory = proc.memory_info().rss
+ memory = self.monitor.get_process_memory()
check_pause(memory)
if count:
| `test_spill_hysteresis` flaky on ubuntu
This test was already marked as flaky in https://github.com/dask/distributed/issues/5840 for other OS but I've seen a failure on ubuntu as well, see https://github.com/fjetter/distributed/runs/5288437230?check_suite_focus=true
(This is a branch where I am testing things around connect timeouts which should not affect the spill buffer. If it fails on this branch, it is reasonable to believe it will fail on others as well)
Test introduced in https://github.com/dask/distributed/pull/5813
cc @crusaderky | dask/distributed | diff --git a/distributed/tests/test_worker.py b/distributed/tests/test_worker.py
index fc024f3b..2cb36819 100644
--- a/distributed/tests/test_worker.py
+++ b/distributed/tests/test_worker.py
@@ -27,6 +27,7 @@ from dask.utils import tmpfile
import distributed
from distributed import (
Client,
+ Event,
Nanny,
Reschedule,
default_client,
@@ -1287,6 +1288,7 @@ async def test_spill_constrained(c, s, w):
nthreads=[("", 1)],
client=True,
worker_kwargs=dict(
+ memory_limit="1000 MB",
memory_monitor_interval="10ms",
memory_target_fraction=False,
memory_spill_fraction=0.7,
@@ -1298,157 +1300,76 @@ async def test_spill_spill_threshold(c, s, a):
Test that the spill threshold uses the process memory and not the managed memory
reported by sizeof(), which may be inaccurate.
"""
- # Reach 'spill' threshold after 400MB of managed data. We need to be generous in
- # order to avoid flakiness due to fluctuations in unmanaged memory.
- # FIXME https://github.com/dask/distributed/issues/5367
- # This works just by luck for the purpose of the spill and pause thresholds,
- # and does NOT work for the target threshold.
- memory = psutil.Process().memory_info().rss
- a.memory_limit = (memory + 300e6) / 0.7
-
- class UnderReport:
- """100 MB process memory, 10 bytes reported managed memory"""
-
- def __init__(self, *args):
- self.data = "x" * int(100e6)
-
- def __sizeof__(self):
- return 10
-
- def __reduce__(self):
- """Speed up test by writing very little to disk when spilling"""
- return UnderReport, ()
-
- futures = c.map(UnderReport, range(8))
-
+ a.monitor.get_process_memory = lambda: 800_000_000 if a.data.fast else 0
+ x = c.submit(inc, 0, key="x")
while not a.data.disk:
await asyncio.sleep(0.01)
-
-
-async def assert_not_everything_is_spilled(w: Worker) -> None:
- start = time()
- while time() < start + 0.5:
- assert w.data
- if not w.data.memory: # type: ignore
- # The hysteresis system fails on Windows and MacOSX because process memory
- # is very slow to shrink down after calls to PyFree. As a result,
- # Worker.memory_monitor will continue spilling until there's nothing left.
- # Nothing we can do about this short of finding either a way to change this
- # behaviour at OS level or a better measure of allocated memory.
- assert not LINUX, "All data was spilled to disk"
- raise pytest.xfail("https://github.com/dask/distributed/issues/5840")
- await asyncio.sleep(0)
+ assert await x == 1
@requires_zict
-@gen_cluster(
- nthreads=[("", 1)],
- client=True,
- worker_kwargs=dict(
- # FIXME https://github.com/dask/distributed/issues/5367
- # Can't reconfigure the absolute target threshold after the worker
- # started, so we're setting it here to something extremely small and then
- # increasing the memory_limit dynamically below in order to test the
- # spill threshold.
- memory_limit=1,
- memory_monitor_interval="10ms",
- memory_target_fraction=False,
- memory_spill_fraction=0.7,
- memory_pause_fraction=False,
- ),
[email protected](
+ "memory_target_fraction,managed,expect_spilled",
+ [
+ # no target -> no hysteresis
+ # Over-report managed memory to test that the automated LRU eviction based on
+ # target is never triggered
+ (False, int(10e9), 1),
+ # Under-report managed memory, so that we reach the spill threshold for process
+ # memory without first reaching the target threshold for managed memory
+ # target == spill -> no hysteresis
+ (0.7, 0, 1),
+ # target < spill -> hysteresis from spill to target
+ (0.4, 0, 7),
+ ],
)
-async def test_spill_no_target_threshold(c, s, a):
- """Test that you can enable the spill threshold while leaving the target threshold
- to False
+@gen_cluster(nthreads=[], client=True)
+async def test_spill_hysteresis(c, s, memory_target_fraction, managed, expect_spilled):
+ """
+ 1. Test that you can enable the spill threshold while leaving the target threshold
+ to False
+ 2. Test the hysteresis system where, once you reach the spill threshold, the worker
+ won't stop spilling until the target threshold is reached
"""
- memory = psutil.Process().memory_info().rss
- a.memory_limit = (memory + 300e6) / 0.7 # 300 MB before we start spilling
-
- class OverReport:
- """Configurable process memory, 10 GB reported managed memory"""
-
- def __init__(self, size):
- self.data = "x" * size
+ class C:
def __sizeof__(self):
- return int(10e9)
-
- def __reduce__(self):
- """Speed up test by writing very little to disk when spilling"""
- return OverReport, (len(self.data),)
-
- f1 = c.submit(OverReport, 0, key="f1")
- await wait(f1)
- assert set(a.data.memory) == {"f1"}
-
- # 800 MB. Use large chunks to stimulate timely release of process memory.
- futures = c.map(OverReport, range(int(100e6), int(100e6) + 8))
-
- while not a.data.disk:
- await asyncio.sleep(0.01)
- assert "f1" in a.data.disk
-
- # Spilling normally starts at the spill threshold and stops at the target threshold.
- # In this special case, it stops as soon as the process memory goes below the spill
- # threshold, e.g. without a hysteresis cycle. Test that we didn't instead dump the
- # whole data to disk (memory_limit * target = 0)
- await assert_not_everything_is_spilled(a)
+ return managed
-
[email protected]
-@requires_zict
-@gen_cluster(
- nthreads=[("", 1)],
- client=True,
- worker_kwargs=dict(
- memory_limit="1 GiB", # See FIXME note in previous test
+ async with Worker(
+ s.address,
+ memory_limit="1000 MB",
memory_monitor_interval="10ms",
- memory_target_fraction=0.4,
+ memory_target_fraction=memory_target_fraction,
memory_spill_fraction=0.7,
memory_pause_fraction=False,
- ),
-)
-async def test_spill_hysteresis(c, s, a):
- memory = psutil.Process().memory_info().rss
- a.memory_limit = (memory + 1e9) / 0.7 # Start spilling after 1 GB
+ ) as a:
+ a.monitor.get_process_memory = lambda: 50_000_000 * len(a.data.fast)
- # Under-report managed memory, so that we reach the spill threshold for process
- # memory without first reaching the target threshold for managed memory
- class UnderReport:
- def __init__(self):
- self.data = "x" * int(100e6) # 100 MB
+ # Add 500MB (reported) process memory. Spilling must not happen.
+ futures = [c.submit(C, pure=False) for _ in range(10)]
+ await wait(futures)
+ await asyncio.sleep(0.1)
+ assert not a.data.disk
- def __sizeof__(self):
- return 1
+ # Add another 250MB unmanaged memory. This must trigger the spilling.
+ futures += [c.submit(C, pure=False) for _ in range(5)]
+ await wait(futures)
- def __reduce__(self):
- """Speed up test by writing very little to disk when spilling"""
- return UnderReport, ()
+ # Wait until spilling starts. Then, wait until it stops.
+ prev_n = 0
+ while not a.data.disk or len(a.data.disk) > prev_n:
+ prev_n = len(a.data.disk)
+ await asyncio.sleep(0)
- max_in_memory = 0
- futures = []
- while not a.data.disk:
- futures.append(c.submit(UnderReport, pure=False))
- max_in_memory = max(max_in_memory, len(a.data.memory))
- await wait(futures)
- await asyncio.sleep(0.05)
- max_in_memory = max(max_in_memory, len(a.data.memory))
-
- # If there were no hysteresis, we would lose exactly 1 key.
- # Note that, for this test to be meaningful, memory must shrink down readily when
- # we deallocate Python objects. This is not always the case on Windows and MacOSX;
- # on Linux we set MALLOC_TRIM to help in that regard.
- # To verify that this test is useful, set target=spill and watch it fail.
- while len(a.data.memory) > max_in_memory - 3:
- await asyncio.sleep(0.01)
- await assert_not_everything_is_spilled(a)
+ assert len(a.data.disk) == expect_spilled
[email protected]
@gen_cluster(
nthreads=[("", 1)],
client=True,
worker_kwargs=dict(
+ memory_limit="1000 MB",
memory_monitor_interval="10ms",
memory_target_fraction=False,
memory_spill_fraction=False,
@@ -1456,35 +1377,59 @@ async def test_spill_hysteresis(c, s, a):
),
)
async def test_pause_executor(c, s, a):
- # See notes in test_spill_spill_threshold
- memory = psutil.Process().memory_info().rss
- a.memory_limit = (memory + 160e6) / 0.8 # Pause after 200 MB
+ mocked_rss = 0
+ a.monitor.get_process_memory = lambda: mocked_rss
- # Note: it's crucial to have a very large single chunk of memory that gets descoped
- # all at once in order to instigate release of process memory.
- # Read: https://github.com/dask/distributed/issues/5840
- def f():
- # Add 400 MB unmanaged memory
- x = "x" * int(400e6)
- w = get_worker()
- while w.status != Status.paused:
- sleep(0.01)
+ # Task that is running when the worker pauses
+ ev_x = Event()
+
+ def f(ev):
+ ev.wait()
+ return 1
+
+ x = c.submit(f, ev_x, key="x")
+ while a.executing_count != 1:
+ await asyncio.sleep(0.01)
with captured_logger(logging.getLogger("distributed.worker")) as logger:
- future = c.submit(f, key="x")
- futures = c.map(slowinc, range(30), delay=0.1)
+ # Task that is queued on the worker when the worker pauses
+ y = c.submit(inc, 1, key="y")
+ while "y" not in a.tasks:
+ await asyncio.sleep(0.01)
- while a.status != Status.paused:
+ # Hog the worker with 900MB unmanaged memory
+ mocked_rss = 900_000_000
+ while s.workers[a.address].status != Status.paused:
await asyncio.sleep(0.01)
assert "Pausing worker" in logger.getvalue()
- assert sum(f.status == "finished" for f in futures) < 4
- while a.status != Status.running:
+ # Task that is queued on the scheduler when the worker pauses.
+ # It is not sent to the worker.
+ z = c.submit(inc, 2, key="z")
+ while "z" not in s.tasks or s.tasks["z"].state != "no-worker":
await asyncio.sleep(0.01)
+ # Test that a task that already started when the worker paused can complete
+ # and its output can be retrieved. Also test that the now free slot won't be
+ # used by other tasks.
+ await ev_x.set()
+ assert await x == 1
+ await asyncio.sleep(0.05)
+
+ assert a.executing_count == 0
+ assert len(a.ready) == 1
+ assert a.tasks["y"].state == "ready"
+ assert "z" not in a.tasks
+
+ # Release the memory. Tasks that were queued on the worker are executed.
+ # Tasks that were stuck on the scheduler are sent to the worker and executed.
+ mocked_rss = 0
+ assert await y == 2
+ assert await z == 3
+
+ assert a.status == Status.running
assert "Resuming worker" in logger.getvalue()
- await wait(futures)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "50 ms"})
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 3
} | 2022.02 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | click==8.1.8
cloudpickle==3.1.1
coverage==7.8.0
dask==2022.2.1
-e git+https://github.com/dask/distributed.git@fb8484ece6fd320a5c79d3ec0a07c72913905adb#egg=distributed
exceptiongroup==1.2.2
fsspec==2025.3.2
iniconfig==2.1.0
Jinja2==3.1.6
locket==1.0.0
MarkupSafe==3.0.2
msgpack==1.1.0
packaging==24.2
partd==1.4.2
pluggy==1.5.0
psutil==7.0.0
pytest==8.3.5
pytest-cov==6.0.0
PyYAML==6.0.2
sortedcontainers==2.4.0
tblib==3.1.0
tomli==2.2.1
toolz==1.0.0
tornado==6.4.2
zict==3.0.0
| name: distributed
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- click==8.1.8
- cloudpickle==3.1.1
- coverage==7.8.0
- dask==2022.2.1
- exceptiongroup==1.2.2
- fsspec==2025.3.2
- iniconfig==2.1.0
- jinja2==3.1.6
- locket==1.0.0
- markupsafe==3.0.2
- msgpack==1.1.0
- packaging==24.2
- partd==1.4.2
- pluggy==1.5.0
- psutil==7.0.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pyyaml==6.0.2
- sortedcontainers==2.4.0
- tblib==3.1.0
- tomli==2.2.1
- toolz==1.0.0
- tornado==6.4.2
- zict==3.0.0
prefix: /opt/conda/envs/distributed
| [
"distributed/tests/test_worker.py::test_spill_spill_threshold",
"distributed/tests/test_worker.py::test_spill_hysteresis[False-10000000000-1]",
"distributed/tests/test_worker.py::test_spill_hysteresis[0.7-0-1]",
"distributed/tests/test_worker.py::test_spill_hysteresis[0.4-0-7]",
"distributed/tests/test_worker.py::test_pause_executor"
] | [
"distributed/tests/test_worker.py::test_fail_write_to_disk_target_1",
"distributed/tests/test_worker.py::test_scheduler_file",
"distributed/tests/test_worker.py::test_bad_local_directory"
] | [
"distributed/tests/test_worker.py::test_worker_nthreads",
"distributed/tests/test_worker.py::test_str",
"distributed/tests/test_worker.py::test_identity",
"distributed/tests/test_worker.py::test_worker_bad_args",
"distributed/tests/test_worker.py::test_upload_file",
"distributed/tests/test_worker.py::test_upload_egg",
"distributed/tests/test_worker.py::test_upload_pyz",
"distributed/tests/test_worker.py::test_broadcast",
"distributed/tests/test_worker.py::test_worker_with_port_zero",
"distributed/tests/test_worker.py::test_worker_port_range",
"distributed/tests/test_worker.py::test_worker_task_data",
"distributed/tests/test_worker.py::test_error_message",
"distributed/tests/test_worker.py::test_chained_error_message",
"distributed/tests/test_worker.py::test_gather",
"distributed/tests/test_worker.py::test_gather_missing_keys",
"distributed/tests/test_worker.py::test_gather_missing_workers",
"distributed/tests/test_worker.py::test_gather_missing_workers_replicated[False]",
"distributed/tests/test_worker.py::test_gather_missing_workers_replicated[True]",
"distributed/tests/test_worker.py::test_io_loop",
"distributed/tests/test_worker.py::test_access_key",
"distributed/tests/test_worker.py::test_run_dask_worker",
"distributed/tests/test_worker.py::test_run_coroutine_dask_worker",
"distributed/tests/test_worker.py::test_Executor",
"distributed/tests/test_worker.py::test_close_on_disconnect",
"distributed/tests/test_worker.py::test_memory_limit_auto",
"distributed/tests/test_worker.py::test_inter_worker_communication",
"distributed/tests/test_worker.py::test_clean",
"distributed/tests/test_worker.py::test_message_breakup",
"distributed/tests/test_worker.py::test_types",
"distributed/tests/test_worker.py::test_system_monitor",
"distributed/tests/test_worker.py::test_restrictions",
"distributed/tests/test_worker.py::test_clean_nbytes",
"distributed/tests/test_worker.py::test_gather_many_small",
"distributed/tests/test_worker.py::test_multiple_transfers",
"distributed/tests/test_worker.py::test_log_exception_on_failed_task",
"distributed/tests/test_worker.py::test_clean_up_dependencies",
"distributed/tests/test_worker.py::test_hold_onto_dependents",
"distributed/tests/test_worker.py::test_stop_doing_unnecessary_work",
"distributed/tests/test_worker.py::test_priorities",
"distributed/tests/test_worker.py::test_heartbeats",
"distributed/tests/test_worker.py::test_worker_dir[Worker]",
"distributed/tests/test_worker.py::test_worker_dir[Nanny]",
"distributed/tests/test_worker.py::test_false_worker_dir",
"distributed/tests/test_worker.py::test_dataframe_attribute_error",
"distributed/tests/test_worker.py::test_fail_write_to_disk_target_2",
"distributed/tests/test_worker.py::test_fail_write_to_disk_spill",
"distributed/tests/test_worker.py::test_pid",
"distributed/tests/test_worker.py::test_get_client",
"distributed/tests/test_worker.py::test_get_client_sync",
"distributed/tests/test_worker.py::test_get_client_coroutine",
"distributed/tests/test_worker.py::test_get_client_coroutine_sync",
"distributed/tests/test_worker.py::test_global_workers",
"distributed/tests/test_worker.py::test_worker_fds",
"distributed/tests/test_worker.py::test_service_hosts_match_worker",
"distributed/tests/test_worker.py::test_start_services",
"distributed/tests/test_worker.py::test_scheduler_delay",
"distributed/tests/test_worker.py::test_statistical_profiling",
"distributed/tests/test_worker.py::test_spill_target_threshold",
"distributed/tests/test_worker.py::test_spill_constrained",
"distributed/tests/test_worker.py::test_statistical_profiling_cycle",
"distributed/tests/test_worker.py::test_get_current_task",
"distributed/tests/test_worker.py::test_reschedule",
"distributed/tests/test_worker.py::test_deque_handler",
"distributed/tests/test_worker.py::test_avoid_memory_monitor_if_zero_limit",
"distributed/tests/test_worker.py::test_dict_data_if_no_spill_to_disk",
"distributed/tests/test_worker.py::test_get_worker_name",
"distributed/tests/test_worker.py::test_parse_memory_limit",
"distributed/tests/test_worker.py::test_scheduler_address_config",
"distributed/tests/test_worker.py::test_prefer_gather_from_local_address",
"distributed/tests/test_worker.py::test_custom_metrics",
"distributed/tests/test_worker.py::test_register_worker_callbacks",
"distributed/tests/test_worker.py::test_register_worker_callbacks_err",
"distributed/tests/test_worker.py::test_data_types",
"distributed/tests/test_worker.py::test_local_directory",
"distributed/tests/test_worker.py::test_local_directory_make_new_directory",
"distributed/tests/test_worker.py::test_host_address",
"distributed/tests/test_worker.py::test_resource_limit",
"distributed/tests/test_worker.py::test_lifetime_stagger",
"distributed/tests/test_worker.py::test_bad_metrics",
"distributed/tests/test_worker.py::test_bad_startup",
"distributed/tests/test_worker.py::test_pip_install",
"distributed/tests/test_worker.py::test_pip_install_fails",
"distributed/tests/test_worker.py::test_update_latency",
"distributed/tests/test_worker.py::test_heartbeat_comm_closed[True]",
"distributed/tests/test_worker.py::test_heartbeat_comm_closed[False]",
"distributed/tests/test_worker.py::test_taskstate_metadata",
"distributed/tests/test_worker.py::test_executor_offload",
"distributed/tests/test_worker.py::test_story",
"distributed/tests/test_worker.py::test_story_with_deps",
"distributed/tests/test_worker.py::test_gather_dep_one_worker_always_busy",
"distributed/tests/test_worker.py::test_worker_client_uses_default_no_close",
"distributed/tests/test_worker.py::test_worker_client_closes_if_created_on_worker_one_worker",
"distributed/tests/test_worker.py::test_worker_client_closes_if_created_on_worker_last_worker_alive",
"distributed/tests/test_worker.py::test_multiple_executors",
"distributed/tests/test_worker.py::test_process_executor",
"distributed/tests/test_worker.py::test_process_executor_kills_process",
"distributed/tests/test_worker.py::test_process_executor_raise_exception",
"distributed/tests/test_worker.py::test_gpu_executor",
"distributed/tests/test_worker.py::test_worker_state_error_release_error_last",
"distributed/tests/test_worker.py::test_worker_state_error_release_error_first",
"distributed/tests/test_worker.py::test_worker_state_error_release_error_int",
"distributed/tests/test_worker.py::test_worker_state_error_long_chain",
"distributed/tests/test_worker.py::test_hold_on_to_replicas",
"distributed/tests/test_worker.py::test_worker_reconnects_mid_compute",
"distributed/tests/test_worker.py::test_worker_reconnects_mid_compute_multiple_states_on_scheduler",
"distributed/tests/test_worker.py::test_forget_dependents_after_release",
"distributed/tests/test_worker.py::test_steal_during_task_deserialization",
"distributed/tests/test_worker.py::test_gather_dep_exception_one_task",
"distributed/tests/test_worker.py::test_gather_dep_exception_one_task_2",
"distributed/tests/test_worker.py::test_acquire_replicas",
"distributed/tests/test_worker.py::test_acquire_replicas_same_channel",
"distributed/tests/test_worker.py::test_acquire_replicas_many",
"distributed/tests/test_worker.py::test_remove_replicas_simple",
"distributed/tests/test_worker.py::test_remove_replicas_while_computing",
"distributed/tests/test_worker.py::test_who_has_consistent_remove_replicas",
"distributed/tests/test_worker.py::test_acquire_replicas_with_no_priority",
"distributed/tests/test_worker.py::test_missing_released_zombie_tasks",
"distributed/tests/test_worker.py::test_missing_released_zombie_tasks_2",
"distributed/tests/test_worker.py::test_task_flight_compute_oserror",
"distributed/tests/test_worker.py::test_gather_dep_cancelled_rescheduled",
"distributed/tests/test_worker.py::test_gather_dep_do_not_handle_response_of_not_requested_tasks",
"distributed/tests/test_worker.py::test_gather_dep_no_longer_in_flight_tasks",
"distributed/tests/test_worker.py::test_deadlock_cancelled_after_inflight_before_gather_from_worker[False-resumed]",
"distributed/tests/test_worker.py::test_deadlock_cancelled_after_inflight_before_gather_from_worker[False-cancelled]",
"distributed/tests/test_worker.py::test_deadlock_cancelled_after_inflight_before_gather_from_worker[True-resumed]",
"distributed/tests/test_worker.py::test_deadlock_cancelled_after_inflight_before_gather_from_worker[True-cancelled]",
"distributed/tests/test_worker.py::test_Worker__to_dict",
"distributed/tests/test_worker.py::test_TaskState__to_dict",
"distributed/tests/test_worker.py::test_unique_task_heap"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,319 | 1,207 | [
"distributed/nanny.py",
"distributed/system_monitor.py",
"distributed/worker.py"
] |
fusion-energy__neutronics_material_maker-20 | 00619e8f53da4bbd01eff10cc7a787f299e0b40e | 2022-03-01 12:06:12 | 00619e8f53da4bbd01eff10cc7a787f299e0b40e | diff --git a/neutronics_material_maker/material.py b/neutronics_material_maker/material.py
index fb4e7ab..141ce9b 100644
--- a/neutronics_material_maker/material.py
+++ b/neutronics_material_maker/material.py
@@ -62,8 +62,8 @@ class Material:
with a unique identifier.
packing_fraction: This value is mutliplied by the density
which allows packing_fraction to be taken into account for materials
- involving an amount of void. Recall that packing_fraction is equal
- to 1/void fraction
+ involving an amount of void. Recall that
+ packing_fraction = 1 - void fraction
enrichment: This is the percentage of isotope enrichment
required for the material. This works for materials that have
an enrichment_target and enrichment_type also specified.
@@ -107,7 +107,7 @@ class Material:
(str) as the key and the amount of that isotope (float) as the value
e.g. {'Li6': 0.9, 'Li7': 0.1} alternatively zaid representation
can also be used instead of the symbol e.g. {'3006': 0.9, '4007': 0.1}
- percent_type: Atom "ao" or or weight fraction "wo"
+ percent_type: Atom "ao" or weight fraction "wo"
density: value to be used as the density. Can be a number or a string.
if a string then it will be evaluated as an equation to find the
density and can contain temperature and pressure variables.
@@ -865,7 +865,7 @@ class Material:
return Material(name=name, **entry)
def from_mixture(
- materials,
+ materials: list,
fracs: List[float],
percent_type: Optional[str] = "vo",
name: Optional[str] = None,
@@ -880,10 +880,65 @@ class Material:
volume_in_cm3: Optional[float] = None,
additional_end_lines: Optional[Dict[str, List[str]]] = None,
):
- if sum(fracs) != 1.0:
+ """Creates a material from a mixture of multiple materials.
+
+ Args:
+ materials: A list of neutronics_material_maker.Materials or openmc.Materials
+ fracs: A list of material fractions, typically sums to 1.
+ percent_type: Volume "vo" Atom "ao" or weight fraction "wo"
+ name: the name of the material
+ packing_fraction: This value is mutliplied by the density
+ which allows packing_fraction to be taken into account for materials
+ involving an amount of void. Recall that
+ packing_fraction = 1 - void fraction
+ temperature: The temperature of the material in degrees
+ Kelvin. Temperature impacts the density of some materials in the
+ collection. Materials in the collection that are impacted by
+ temperature have density equations that depend on temperature.
+ These tend to be liquids and gases used for coolants and even
+ liquids such as lithium-lead and FLiBe that are used as breeder
+ materials. Added to the OpenMC material object and the serpent
+ material card.
+ temperature_to_neutronics_code: The temperature args are often used to
+ find the material density via density equations. However it can be
+ desirable to not make use of this temperature in the neutronics
+ codes. Typically this is due to missing cross section data.
+ Defaults to True which makes use of any material temperature in the
+ neutronics material. Can be set to False which doesn't propagate
+ temperature data to the neutronics material. This only impacts
+ OpenMC and serpent materials. As shift materials require the use of
+ temperature and fispact/mcnp materials don't make use of
+ temperature on the material card.
+ pressure: The pressure of the material in Pascals. Pressure impacts the
+ density of some materials in the collection. Materials in the
+ collection that are impacted by pressure have density equations
+ that depend on pressure. These tend to be liquids and gases used
+ for coolants such as H2O and CO2.
+ zaid_suffix: The nuclear library to apply to the zaid, for
+ example ".31c", this is used in MCNP and Serpent material cards.
+ material_id: the id number or mat number used in the MCNP material card
+ decimal_places: The number of decimal places to use in MCNP and
+ Seprent material cards when they are printed out (default of 8).
+ volume_in_cm3: The volume of the material in cm3, used when
+ creating fispact material cards
+ comment: An entry used to store information on the source of the
+ material data
+ additional_end_lines: Additional lines of test that are added to the end of
+ the material card. Compatable with MCNP, Serpent, Fispact outputs
+ which are string based. Argument should be a dictionary specifying
+ the code and a list of lines to be added, be sure to include any
+ white required spaces in the string. This example will add a single
+ S(a,b) card to an MCNP card {'mcnp': [' mt24 lwtr.01']}.
+
+ Returns: neutronics_material_maker.Material() object
+ """
+
+ if sum(fracs) * packing_fraction + (1 - packing_fraction) != 1.0:
msg = (
- "warning sum of MutliMaterials.fracs do not sum to 1."
- f"{fracs} = {sum(fracs)}"
+ "Warning some material is not accounted for as the follow "
+ "equation is not equal to 1."
+ "sum(fracs)*packing_fraction + (1-packing_fraction)"
+ f"sum({fracs})*{packing_fraction} + (1-{packing_fraction})"
)
warnings.warn(msg, UserWarning)
| packing fraction usage
> This value is mutliplied by the density which allows packing_fraction to be taken into account for materials involving an amount of void. Recall that packing_fraction is equal to 1/void fraction
The documentation gives this statement for the packing_fraction parameter but it does not agree with the usage/output.
if I have 10 % void in a material 1/10 = 0.1 and the material density is multiplied by this to be an order of magnitude lower not 90% of the density which is the intention. Using a number greater than 1 gives an error.
Should the correct definition not be packing_fraction = 1 - void fraction
Also when using with material.from_mixture if the fractions of the materials do not add to 1 there is a warning, but not if packing_fraction is also used, the resulting density is adjusted for both the remaining fraction and the packing fraction. I believe the warning should be still displayed.
| fusion-energy/neutronics_material_maker | diff --git a/tests/test_Material_from_mixture.py b/tests/test_Material_from_mixture.py
index e071fcc..45fd693 100644
--- a/tests/test_Material_from_mixture.py
+++ b/tests/test_Material_from_mixture.py
@@ -495,7 +495,7 @@ class test_object_properties(unittest.TestCase):
assert issubclass(w[-1].category, UserWarning)
# the second entry is needed as OpenMC material mixer also raises
# and error
- assert "warning sum of MutliMaterials.fracs do not sum to 1." in str(
+ assert "Warning some material is not accounted for as the follow" in str(
w[-2].message
)
@@ -521,13 +521,13 @@ class test_object_properties(unittest.TestCase):
assert issubclass(w[-1].category, UserWarning)
# the second entry is needed as OpenMC material mixer also raises
# and error
- assert "warning sum of MutliMaterials.fracs do not sum to 1." in str(
+ assert "Warning some material is not accounted for as the follow" in str(
w[-2].message
)
- def test_incorrect_packing_fraction():
- """checks a ValueError is raised when the packing_fraction is the
- wrong type"""
+ def not_fully_accounting_mat():
+ """checks a warning is raised when the fracs and packing fractions
+ don't account for all the material"""
nmm.Material.from_mixture(
name="test_material",
@@ -535,11 +535,23 @@ class test_object_properties(unittest.TestCase):
nmm.Material.from_library("tungsten", packing_fraction=0.6),
nmm.Material.from_library("eurofer", packing_fraction=0.8),
],
- fracs=[0.3, 0.7],
- packing_fraction="1",
+ fracs=[0.3, 0.6],
+ packing_fraction=0.2,
)
- self.assertRaises(ValueError, test_incorrect_packing_fraction)
+ with warnings.catch_warnings(record=True) as w:
+ # Cause all warnings to always be triggered.
+ warnings.simplefilter("always")
+ # Trigger a warning.
+ not_fully_accounting_mat()
+ # Verify some things
+ assert len(w) >= 1
+ assert issubclass(w[-1].category, UserWarning)
+ # the second entry is needed as OpenMC material mixer also raises
+ # and error
+ assert "Warning some material is not accounted for as the follow" in str(
+ w[-2].message
+ )
def test_too_large_packing_fraction():
"""checks a ValueError is raised when the packing_fraction is the
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": [
"environment.yml"
],
"install": "pip install -e .[density]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "environment.yml",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y git"
],
"python": "3.8",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asteval==1.0.5
asttokens @ file:///home/conda/feedstock_root/build_artifacts/asttokens_1733175639022/work
backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work
Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1666788425425/work
cached-property @ file:///home/conda/feedstock_root/build_artifacts/cached_property_1615209429212/work
certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1725278078093/work/certifi
cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1723018376978/work
charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1728479282467/work
contourpy @ file:///home/conda/feedstock_root/build_artifacts/contourpy_1695554215751/work
CoolProp==6.7.0
cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1696677705766/work
decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work
exceptiongroup==1.2.2
executing @ file:///home/conda/feedstock_root/build_artifacts/executing_1725214404607/work
fonttools @ file:///home/conda/feedstock_root/build_artifacts/fonttools_1720359039462/work
future @ file:///home/conda/feedstock_root/build_artifacts/future_1708610096684/work
h2 @ file:///home/conda/feedstock_root/build_artifacts/h2_1634280454336/work
h5py @ file:///home/conda/feedstock_root/build_artifacts/h5py_1717664826778/work
hpack==4.0.0
hyperframe @ file:///home/conda/feedstock_root/build_artifacts/hyperframe_1619110129307/work
idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1726459485162/work
importlib_resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1725921340658/work
iniconfig==2.1.0
ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1683289033986/work
jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1696326070614/work
kiwisolver @ file:///home/conda/feedstock_root/build_artifacts/kiwisolver_1695379923772/work
lxml @ file:///home/conda/feedstock_root/build_artifacts/lxml_1723458263021/work
matplotlib @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-suite_1695076686224/work
matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1713250518406/work
munkres==1.1.4
-e git+https://github.com/fusion-energy/neutronics_material_maker.git@00619e8f53da4bbd01eff10cc7a787f299e0b40e#egg=neutronics_material_maker
numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1687808301083/work
openmc @ file:///home/conda/feedstock_root/build_artifacts/openmc_1719236595914/work
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1733203243479/work
pandas @ file:///home/conda/feedstock_root/build_artifacts/pandas_1688740514018/work
parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1712320355065/work
pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1706113125309/work
pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work
pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1719903565503/work
platformdirs @ file:///home/conda/feedstock_root/build_artifacts/platformdirs_1726613481435/work
pluggy==1.5.0
pooch @ file:///home/conda/feedstock_root/build_artifacts/pooch_1717777836653/work
prompt_toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1727341649933/work
ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
pure_eval @ file:///home/conda/feedstock_root/build_artifacts/pure_eval_1721585709575/work
pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1711811537435/work
Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1714846767233/work
pymoab @ file:///home/conda/feedstock_root/build_artifacts/moab_1717683234448/work/pymoab
pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1724616129934/work
PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1661604839144/work
pytest==8.3.5
python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1709299778482/work
pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1726055524169/work
requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1717057054362/work
scipy @ file:///home/conda/feedstock_root/build_artifacts/scipy-split_1683900206454/work/base/dist/scipy-1.10.1-cp38-cp38-linux_x86_64.whl#sha256=13ab5c17dedeb97a65ba90de1c51475f2d3ae0fe50bc43e9b018cc2943541de4
six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work
stack-data @ file:///home/conda/feedstock_root/build_artifacts/stack_data_1669632077133/work
tomli==2.2.1
traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1713535121073/work
typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1717802530399/work
tzdata @ file:///home/conda/feedstock_root/build_artifacts/python-tzdata_1727140567071/work
uncertainties @ file:///home/conda/feedstock_root/build_artifacts/uncertainties_1720452225073/work
unicodedata2 @ file:///home/conda/feedstock_root/build_artifacts/unicodedata2_1695847997538/work
urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1726496430923/work
wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1704731205417/work
zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1731262100163/work
zstandard @ file:///home/conda/feedstock_root/build_artifacts/zstandard_1667296101734/work
| name: neutronics_material_maker
channels:
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_gnu
- asttokens=3.0.0=pyhd8ed1ab_0
- backcall=0.2.0=pyh9f0ad1d_0
- blosc=1.21.6=he440d0b_1
- brotli=1.1.0=hb9d3cd8_2
- brotli-bin=1.1.0=hb9d3cd8_2
- brotli-python=1.0.9=py38hfa26641_8
- bzip2=1.0.8=h4bc722e_7
- c-ares=1.34.4=hb9d3cd8_0
- ca-certificates=2025.1.31=hbcca054_0
- cached-property=1.5.2=hd8ed1ab_1
- cached_property=1.5.2=pyha770c72_1
- certifi=2024.8.30=pyhd8ed1ab_0
- cffi=1.17.0=py38heb5c249_0
- charset-normalizer=3.4.0=pyhd8ed1ab_0
- contourpy=1.1.1=py38h7f3f72f_1
- cycler=0.12.1=pyhd8ed1ab_0
- dagmc=3.2.3=nompi_py310h30e612f_102
- decorator=5.1.1=pyhd8ed1ab_0
- eigen=3.4.0=h00ab1b0_0
- executing=2.1.0=pyhd8ed1ab_0
- fonttools=4.53.1=py38h2019614_0
- freetype=2.13.3=h48d6fc4_0
- future=1.0.0=pyhd8ed1ab_0
- h2=4.1.0=pyhd8ed1ab_0
- h5py=3.11.0=nompi_py38h55b5aab_102
- hdf4=4.2.15=h2a13503_7
- hdf5=1.14.3=nompi_h2d575fe_109
- hpack=4.0.0=pyh9f0ad1d_0
- hyperframe=6.0.1=pyhd8ed1ab_0
- idna=3.10=pyhd8ed1ab_0
- importlib-resources=6.4.5=pyhd8ed1ab_0
- importlib_resources=6.4.5=pyhd8ed1ab_0
- ipython=8.12.2=pyh41d4057_0
- jedi=0.19.1=pyhd8ed1ab_0
- keyutils=1.6.1=h166bdaf_0
- kiwisolver=1.4.5=py38h7f3f72f_1
- krb5=1.21.3=h659f571_0
- lcms2=2.17=h717163a_0
- ld_impl_linux-64=2.43=h712a8e2_4
- lerc=4.0.0=h27087fc_0
- libaec=1.1.3=h59595ed_0
- libblas=3.9.0=20_linux64_openblas
- libbrotlicommon=1.1.0=hb9d3cd8_2
- libbrotlidec=1.1.0=hb9d3cd8_2
- libbrotlienc=1.1.0=hb9d3cd8_2
- libcblas=3.9.0=20_linux64_openblas
- libcurl=8.12.1=h332b0f4_0
- libdeflate=1.23=h4ddbbb0_0
- libedit=3.1.20250104=pl5321h7949ede_0
- libev=4.33=hd590300_2
- libffi=3.4.6=h2dba641_1
- libgcc=14.2.0=h767d61c_2
- libgcc-ng=14.2.0=h69a702a_2
- libgfortran=14.2.0=h69a702a_2
- libgfortran-ng=14.2.0=h69a702a_2
- libgfortran5=14.2.0=hf1ad2bd_2
- libgomp=14.2.0=h767d61c_2
- libiconv=1.18=h4ce23a2_1
- libjpeg-turbo=3.0.0=hd590300_1
- liblapack=3.9.0=20_linux64_openblas
- liblzma=5.6.4=hb9d3cd8_0
- liblzma-devel=5.6.4=hb9d3cd8_0
- libnetcdf=4.9.2=nompi_h00e09a9_116
- libnghttp2=1.64.0=h161d5f1_0
- libnsl=2.0.1=hd590300_0
- libopenblas=0.3.25=pthreads_h413a1c8_0
- libpng=1.6.47=h943b412_0
- libsqlite=3.49.1=hee588c1_2
- libssh2=1.11.1=hf672d98_0
- libstdcxx=14.2.0=h8f9b012_2
- libstdcxx-ng=14.2.0=h4852527_2
- libtiff=4.7.0=hd9ff511_3
- libuuid=2.38.1=h0b41bf4_0
- libwebp-base=1.5.0=h851e524_0
- libxcb=1.17.0=h8a09558_0
- libxcrypt=4.4.36=hd590300_1
- libxml2=2.13.7=h0d44e9d_0
- libxslt=1.1.39=h76b75d6_0
- libzip=1.11.2=h6991a6a_0
- libzlib=1.3.1=hb9d3cd8_2
- lxml=5.3.0=py38hd8ce619_0
- lz4-c=1.10.0=h5888daf_1
- matplotlib-base=3.7.3=py38h58ed7fa_0
- matplotlib-inline=0.1.7=pyhd8ed1ab_0
- metis=5.1.0=hd0bcaf9_1007
- moab=5.5.1=nompi_tempest_py38h6786b19_2
- munkres=1.1.4=pyh9f0ad1d_0
- ncurses=6.5=h2d0b736_3
- njoy2016=2016.76=py38h30a670e_0
- numpy=1.24.4=py38h59b608b_0
- openjpeg=2.5.3=h5fbd93e_0
- openmc=0.14.0=dagmc_nompi_py38h91a61c9_102
- openssl=3.4.1=h7b32b05_0
- packaging=24.2=pyhd8ed1ab_2
- pandas=2.0.3=py38h01efb38_1
- parso=0.8.4=pyhd8ed1ab_0
- pexpect=4.9.0=pyhd8ed1ab_0
- pickleshare=0.7.5=py_1003
- pillow=10.4.0=py38h2bc05a7_0
- pip=24.3.1=pyh8b19718_0
- platformdirs=4.3.6=pyhd8ed1ab_0
- pooch=1.8.2=pyhd8ed1ab_0
- prompt-toolkit=3.0.48=pyha770c72_0
- prompt_toolkit=3.0.48=hd8ed1ab_1
- pthread-stubs=0.4=hb9d3cd8_1002
- ptyprocess=0.7.0=pyhd3deb0d_0
- pure_eval=0.2.3=pyhd8ed1ab_0
- pycparser=2.22=pyhd8ed1ab_0
- pygments=2.18.0=pyhd8ed1ab_0
- pyparsing=3.1.4=pyhd8ed1ab_0
- pysocks=1.7.1=pyha2e5f31_6
- python=3.8.20=h4a871b0_2_cpython
- python-dateutil=2.9.0=pyhd8ed1ab_0
- python-tzdata=2024.2=pyhd8ed1ab_0
- python_abi=3.8=5_cp38
- pytz=2024.2=pyhd8ed1ab_0
- readline=8.2=h8c095d6_2
- requests=2.32.3=pyhd8ed1ab_0
- scipy=1.10.1=py38h59b608b_3
- setuptools=75.3.0=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- snappy=1.2.1=h8bd8927_1
- stack_data=0.6.2=pyhd8ed1ab_0
- tempest-remap=2.2.0=heeae502_5
- tk=8.6.13=noxft_h4845f30_101
- traitlets=5.14.3=pyhd8ed1ab_0
- typing_extensions=4.12.2=pyha770c72_0
- uncertainties=3.2.2=pyhd8ed1ab_1
- unicodedata2=15.1.0=py38h01eb140_0
- urllib3=2.2.3=pyhd8ed1ab_0
- wcwidth=0.2.13=pyhd8ed1ab_0
- wheel=0.45.1=pyhd8ed1ab_0
- xorg-libxau=1.0.12=hb9d3cd8_0
- xorg-libxdmcp=1.1.5=hb9d3cd8_0
- xz=5.6.4=hbcc6ac9_0
- xz-gpl-tools=5.6.4=hbcc6ac9_0
- xz-tools=5.6.4=hb9d3cd8_0
- zipp=3.21.0=pyhd8ed1ab_0
- zlib=1.3.1=hb9d3cd8_2
- zstandard=0.19.0=py38h0a891b7_0
- zstd=1.5.7=hb8e6e7a_2
- pip:
- asteval==1.0.5
- coolprop==6.7.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- neutronics-material-maker==1.0.4.dev23+g00619e8
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/neutronics_material_maker
| [
"tests/test_Material_from_mixture.py::test_object_properties::test_incorrect_settings"
] | [
"tests/test_Material_from_mixture.py::test_object_properties::test_fispact_from_mixture_type",
"tests/test_Material_from_mixture.py::test_object_properties::test_shift_from_mixture_type"
] | [
"tests/test_Material_from_mixture.py::test_object_properties::test_density_of_mixed_materials_from_density",
"tests/test_Material_from_mixture.py::test_object_properties::test_density_of_mixed_one_packed_crystal_and_one_non_crystal",
"tests/test_Material_from_mixture.py::test_object_properties::test_density_of_mixed_two_packed_and_non_packed_crystals",
"tests/test_Material_from_mixture.py::test_object_properties::test_density_of_mixed_two_packed_crystals",
"tests/test_Material_from_mixture.py::test_object_properties::test_from_mixture_attributes_from_material_objects_and_openmc_materials",
"tests/test_Material_from_mixture.py::test_object_properties::test_from_mixture_vs_mix_materials",
"tests/test_Material_from_mixture.py::test_object_properties::test_json_dump_contains_correct_keys",
"tests/test_Material_from_mixture.py::test_object_properties::test_json_dump_contains_correct_values",
"tests/test_Material_from_mixture.py::test_object_properties::test_json_dump_works",
"tests/test_Material_from_mixture.py::test_object_properties::test_make_from_mixture_from_material_objects",
"tests/test_Material_from_mixture.py::test_object_properties::test_make_from_mixture_from_openmc_materials",
"tests/test_Material_from_mixture.py::test_object_properties::test_mcnp_from_mixture_type",
"tests/test_Material_from_mixture.py::test_object_properties::test_mutliname_setting",
"tests/test_Material_from_mixture.py::test_object_properties::test_packing_fraction_for_from_mixture_function",
"tests/test_Material_from_mixture.py::test_object_properties::test_packing_fraction_for_mix_materials_function",
"tests/test_Material_from_mixture.py::test_object_properties::test_packing_fraction_for_single_materials",
"tests/test_Material_from_mixture.py::test_object_properties::test_packing_fraction_of_a_from_mixture",
"tests/test_Material_from_mixture.py::test_object_properties::test_serpent_from_mixture_type",
"tests/test_Material_from_mixture.py::test_object_properties::test_temperature_from_C_in_from_mixtures",
"tests/test_Material_from_mixture.py::test_object_properties::test_temperature_from_K_in_from_mixtures"
] | [] | MIT License | 12,322 | 1,363 | [
"neutronics_material_maker/material.py"
] |
|
gammapy__gammapy-3837 | e9464236ac26fcec555425d947b18daceffd567a | 2022-03-02 16:45:48 | a55c6d964613081ab364ba994c899605a0a73c2c | codecov[bot]: # [Codecov](https://codecov.io/gh/gammapy/gammapy/pull/3837?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=gammapy) Report
> Merging [#3837](https://codecov.io/gh/gammapy/gammapy/pull/3837?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=gammapy) (f0fc779) into [master](https://codecov.io/gh/gammapy/gammapy/commit/dd35f7a27c2f939fa6af3080d9c0ec0d1029260e?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=gammapy) (dd35f7a) will **increase** coverage by `0.00%`.
> The diff coverage is `100.00%`.
[](https://codecov.io/gh/gammapy/gammapy/pull/3837?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=gammapy)
```diff
@@ Coverage Diff @@
## master #3837 +/- ##
=======================================
Coverage 93.82% 93.83%
=======================================
Files 162 162
Lines 19834 19843 +9
=======================================
+ Hits 18610 18619 +9
Misses 1224 1224
```
| [Impacted Files](https://codecov.io/gh/gammapy/gammapy/pull/3837?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=gammapy) | Coverage Δ | |
|---|---|---|
| [gammapy/irf/core.py](https://codecov.io/gh/gammapy/gammapy/pull/3837/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=gammapy#diff-Z2FtbWFweS9pcmYvY29yZS5weQ==) | `89.38% <ø> (ø)` | |
| [gammapy/utils/interpolation.py](https://codecov.io/gh/gammapy/gammapy/pull/3837/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=gammapy#diff-Z2FtbWFweS91dGlscy9pbnRlcnBvbGF0aW9uLnB5) | `94.94% <100.00%> (+0.50%)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/gammapy/gammapy/pull/3837?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=gammapy).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=gammapy)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/gammapy/gammapy/pull/3837?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=gammapy). Last update [dd35f7a...f0fc779](https://codecov.io/gh/gammapy/gammapy/pull/3837?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=gammapy). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=gammapy).
maxnoe: > Should we raise an error when scalar data is interpolated with method="linear" or just silently use "nearest"?
I'd say raise an error. Falling back to nearest is probably not the right choice every time, so it shouldn't be done silently | diff --git a/gammapy/data/event_list.py b/gammapy/data/event_list.py
index 385a378ea..33c01a0ef 100644
--- a/gammapy/data/event_list.py
+++ b/gammapy/data/event_list.py
@@ -769,6 +769,34 @@ class EventList:
mask &= offset < offset_band[1]
return self.select_row_subset(mask)
+ def select_rad_max(self, rad_max, position=None):
+ """Select energy dependent offset
+
+ Parameters
+ ----------
+ rad_max : `~gamapy.irf.RadMax2D`
+ Rad max definition
+ position : `~astropy.coordinates.SkyCoord`
+ Center position. By default the pointing position is used.
+
+ Returns
+ -------
+ event_list : `EventList`
+ Copy of event list with selection applied.
+ """
+ if position is None:
+ position = self.pointing_radec
+
+ offset = position.separation(self.pointing_radec)
+ separation = position.separation(self.radec)
+
+ rad_max_for_events = rad_max.evaluate(
+ method="nearest", energy=self.energy, offset=offset
+ )
+
+ selected = separation <= rad_max_for_events
+ return self.select_row_subset(selected)
+
@property
def is_pointed_observation(self):
"""Whether observation is pointed"""
diff --git a/gammapy/irf/core.py b/gammapy/irf/core.py
index 5af7d6db2..0ec3a92ad 100644
--- a/gammapy/irf/core.py
+++ b/gammapy/irf/core.py
@@ -258,6 +258,7 @@ class IRF(metaclass=abc.ABCMeta):
"""
# TODO: change to coord dict?
non_valid_axis = set(kwargs).difference(self.axes.names)
+
if non_valid_axis:
raise ValueError(
f"Not a valid coordinate axis {non_valid_axis}"
@@ -270,6 +271,7 @@ class IRF(metaclass=abc.ABCMeta):
coord = kwargs.get(key, value)
if coord is not None:
coords_default[key] = u.Quantity(coord, copy=False)
+
data = self._interpolate(coords_default.values(), method=method)
if self.interp_kwargs["fill_value"] is not None:
diff --git a/gammapy/irf/rad_max.py b/gammapy/irf/rad_max.py
index cbe1a7344..32943ff87 100644
--- a/gammapy/irf/rad_max.py
+++ b/gammapy/irf/rad_max.py
@@ -32,11 +32,9 @@ class RadMax2D(IRF):
required_axes = ["energy", "offset"]
default_unit = u.deg
-
@classmethod
def from_irf(cls, irf):
- '''
- Create a RadMax2D instance from another IRF component.
+ """Create a RadMax2D instance from another IRF component.
This reads the RAD_MAX metadata keyword from the irf and creates
a RadMax2D with a single bin in energy and offset using the
@@ -57,21 +55,25 @@ class RadMax2D(IRF):
-----
This assumes the true energy axis limits are also valid for the
reco energy limits.
- '''
+ """
if not irf.is_pointlike:
- raise ValueError('RadMax2D.from_irf is only valid for point-like irfs')
+ raise ValueError("RadMax2D.from_irf requires a point-like irf")
- if 'RAD_MAX' not in irf.meta:
- raise ValueError('irf does not contain RAD_MAX keyword')
+ if "RAD_MAX" not in irf.meta:
+ raise ValueError("Irf does not contain RAD_MAX keyword")
rad_max_value = irf.meta["RAD_MAX"]
if not isinstance(rad_max_value, float):
- raise ValueError('RAD_MAX must be a float')
+ raise ValueError(
+ f"RAD_MAX must be a float, got '{type(rad_max_value)}' instead"
+ )
energy_axis = irf.axes["energy_true"].copy(name="energy").squash()
offset_axis = irf.axes["offset"].squash()
return cls(
- data=u.Quantity([[rad_max_value]], u.deg),
+ data=rad_max_value,
axes=[energy_axis, offset_axis],
+ unit="deg",
+ interp_kwargs={"method": "nearest", "fill_value": None},
)
diff --git a/gammapy/makers/utils.py b/gammapy/makers/utils.py
index 3c2d8ed1c..975738506 100644
--- a/gammapy/makers/utils.py
+++ b/gammapy/makers/utils.py
@@ -428,34 +428,6 @@ def make_theta_squared_table(
return table
-def get_rad_max_vs_energy(rad_max, pointing, geom):
- """Obtain the values of `RAD_MAX` at a given offset and for an array of
- estimated energy values (in the geom energy axis).
-
- Parameters
- ----------
- rad_max : `~gammapy.irf.RadMax2D`
- the RAD_MAX_2D table IRF
- geom : `~gammapy.maps.Geom`
- the map geom to be used
- pointing : `~astropy.coordinates.SkyCoord`
- pointing direction
-
- Returns
- -------
- array : `~astropy.units.Quantity`
- Values of the `RAD_MAX` corresponding to each estimated energy bin center.
- """
- on_center = geom.center_skydir
- offset = on_center.separation(pointing)
-
- rad_max_vals = rad_max.evaluate(
- offset=offset, energy=geom.axes["energy"].center
- )
-
- return rad_max_vals
-
-
def make_counts_rad_max(geom, rad_max, events):
"""Extract the counts using for the ON region size the values in the
`RAD_MAX_2D` table.
@@ -474,31 +446,15 @@ def make_counts_rad_max(geom, rad_max, events):
counts : `~gammapy.maps.RegionNDMap`
Counts vs estimated energy extracted from the ON region.
"""
- selected_events = apply_rad_max(events, rad_max, geom.region.center)
+ selected_events = events.select_rad_max(
+ rad_max=rad_max, position=geom.region.center
+ )
- counts_geom = RegionGeom(geom.region, axes=[geom.axes['energy']])
- counts = Map.from_geom(counts_geom)
+ counts = Map.from_geom(geom=geom)
counts.fill_events(selected_events)
-
return counts
-def apply_rad_max(events, rad_max, position):
- '''Apply the RAD_MAX cut to the event list for given region'''
- offset = position.separation(events.pointing_radec)
- separation = position.separation(events.radec)
-
- if rad_max.data.shape == (1, 1):
- rad_max_for_events = rad_max.quantity[0, 0]
- else:
- rad_max_for_events = rad_max.evaluate(
- method="nearest", energy=events.energy, offset=offset
- )
-
- selected = separation <= rad_max_for_events
- return events.select_row_subset(selected)
-
-
def are_regions_overlapping_rad_max(regions, rad_max, offset, e_min, e_max):
"""
Calculate pair-wise separations between all regions and compare with rad_max
@@ -509,20 +465,15 @@ def are_regions_overlapping_rad_max(regions, rad_max, offset, e_min, e_max):
for a, b in combinations(regions, 2)
])
-
- # evaluate fails with a single bin somewhere trying to interpolate
- if rad_max.data.shape == (1, 1):
- rad_max_at_offset = rad_max.quantity[0, 0]
- else:
- rad_max_at_offset = rad_max.evaluate(offset=offset)
- # do not check bins outside of energy range
- edges_min = rad_max.axes['energy'].edges_min
- edges_max = rad_max.axes['energy'].edges_max
- # to be sure all possible values are included, we check
- # for the *upper* energy bin to be larger than e_min and the *lower* edge
- # to be larger than e_max
- mask = (edges_max >= e_min) & (edges_min <= e_max)
- rad_max_at_offset = rad_max_at_offset[mask]
+ rad_max_at_offset = rad_max.evaluate(offset=offset)
+ # do not check bins outside of energy range
+ edges_min = rad_max.axes['energy'].edges_min
+ edges_max = rad_max.axes['energy'].edges_max
+ # to be sure all possible values are included, we check
+ # for the *upper* energy bin to be larger than e_min and the *lower* edge
+ # to be larger than e_max
+ mask = (edges_max >= e_min) & (edges_min <= e_max)
+ rad_max_at_offset = rad_max_at_offset[mask]
return np.any(separations[np.newaxis, :] < (2 * rad_max_at_offset))
@@ -564,7 +515,7 @@ def make_counts_off_rad_max(
)
if len(off_regions) == 0:
- log.warn("RegionsFinder returned no regions")
+ log.warning("RegionsFinder returned no regions")
# counts_off=None, acceptance_off=0
return None, RegionNDMap.from_geom(on_geom, data=0)
@@ -592,7 +543,9 @@ def make_counts_off_rad_max(
)
for off_region in off_regions:
- selected_events = apply_rad_max(events, rad_max, off_region.center)
+ selected_events = events.select_rad_max(
+ rad_max=rad_max, position=off_region.center
+ )
counts_off.fill_events(selected_events)
return counts_off, acceptance_off
diff --git a/gammapy/utils/interpolation.py b/gammapy/utils/interpolation.py
index 0475d139e..9c0450e88 100644
--- a/gammapy/utils/interpolation.py
+++ b/gammapy/utils/interpolation.py
@@ -3,6 +3,7 @@
import numpy as np
import scipy.interpolate
from astropy import units as u
+from itertools import compress
__all__ = [
"interpolate_profile",
@@ -54,22 +55,29 @@ class ScaledRegularGridInterpolator:
self.scale_points = [interpolation_scale(scale) for scale in points_scale]
self.scale = interpolation_scale(values_scale)
- self._include_dim = [len(p) > 1 for p in points]
-
- points_scaled = tuple(
- [
- scale(p)
- for p, scale, _ in zip(points, self.scale_points, self._include_dim)
- if _
- ]
- )
- values_scaled = self.scale(values).squeeze()
self.axis = axis
+ self._include_dimensions = [len(p) > 1 for p in points]
+
+ values_scaled = self.scale(values)
+ points_scaled = self._scale_points(points=points)
+
if extrapolate:
kwargs.setdefault("bounds_error", False)
kwargs.setdefault("fill_value", None)
+ method = kwargs.get("method", None)
+
+ if not np.any(self._include_dimensions):
+ if method != "nearest":
+ raise ValueError(
+ "Interpolating scalar values requires using "
+ "method='nearest' explicitely."
+ )
+
+ if np.any(self._include_dimensions):
+ values_scaled = np.squeeze(values_scaled)
+
if axis is None:
self._interpolate = scipy.interpolate.RegularGridInterpolator(
points=points_scaled, values=values_scaled, **kwargs
@@ -79,6 +87,14 @@ class ScaledRegularGridInterpolator:
points_scaled[0], values_scaled, axis=axis
)
+ def _scale_points(self, points):
+ points_scaled = [scale(p) for p, scale in zip(points, self.scale_points)]
+
+ if np.any(self._include_dimensions):
+ points_scaled = compress(points_scaled, self._include_dimensions)
+
+ return tuple(points_scaled)
+
def __call__(self, points, method="linear", clip=True, **kwargs):
"""Interpolate data points.
@@ -92,14 +108,7 @@ class ScaledRegularGridInterpolator:
clip : bool
Clip values at zero after interpolation.
"""
-
- points = tuple(
- [
- scale(p)
- for scale, p, _ in zip(self.scale_points, points, self._include_dim)
- if _
- ]
- )
+ points = self._scale_points(points=points)
if self.axis is None:
points = np.broadcast_arrays(*points)
| IRF.evaluate fails for IRFs with single energy bin
**Gammapy version**
Current master
**Bug description**
`IRF.evaluate` fails in the interpolation code when there is only a single energy bin, as for example is used with the global RAD_MAX:
**Expected behavior**
Returning the single global value.
**To Reproduce**
```
In [1]: from gammapy.irf import RadMax2D
In [2]: from gammapy.maps import MapAxis
In [3]: energy_axis = MapAxis.from_energy_bounds(0.01, 100, 1, unit="TeV")
In [4]: offset_axis = MapAxis.from_bounds(0., 5, 1, unit="deg", name="offset", )
In [5]: import astropy.units as u
In [6]: rad_max = RadMax2D(data=[[0.1]] * u.deg, axes=[energy_axis, offset_axis])
In [7]: rad_max.evaluate(1 * u.TeV)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Input In [7], in <module>
----> 1 rad_max.evaluate(1 * u.TeV)
File ~/Projects/gammapy/gammapy/irf/core.py:239, in IRF.evaluate(self, method, **kwargs)
237 if coord is not None:
238 coords_default[key] = u.Quantity(coord, copy=False)
--> 239 data = self._interpolate(coords_default.values(), method=method)
241 if self.interp_kwargs["fill_value"] is not None:
242 idxs = self.axes.coord_to_idx(coords_default, clip=False)
File ~/Projects/gammapy/gammapy/utils/interpolation.py:106, in ScaledRegularGridInterpolator.__call__(self, points, method, clip, **kwargs)
104 if self.axis is None:
105 points = np.broadcast_arrays(*points)
--> 106 points_interp = np.stack([_.flat for _ in points]).T
107 values = self._interpolate(points_interp, method, **kwargs)
108 values = self.scale.inverse(values.reshape(points[0].shape))
File <__array_function__ internals>:180, in stack(*args, **kwargs)
File ~/.local/anaconda/envs/gammapy-dev/lib/python3.8/site-packages/numpy/core/shape_base.py:422, in stack(arrays, axis, out)
420 arrays = [asanyarray(arr) for arr in arrays]
421 if not arrays:
--> 422 raise ValueError('need at least one array to stack')
424 shapes = {arr.shape for arr in arrays}
425 if len(shapes) != 1:
ValueError: need at least one array to stack
``` | gammapy/gammapy | diff --git a/gammapy/irf/tests/test_rad_max.py b/gammapy/irf/tests/test_rad_max.py
index 0fdec6cd0..345d5ceee 100644
--- a/gammapy/irf/tests/test_rad_max.py
+++ b/gammapy/irf/tests/test_rad_max.py
@@ -1,12 +1,13 @@
import numpy as np
+from numpy.testing import assert_allclose
import astropy.units as u
from gammapy.maps import MapAxis
+from gammapy.irf import RadMax2D, EffectiveAreaTable2D
+
import pytest
def test_rad_max_roundtrip(tmp_path):
- from gammapy.irf import RadMax2D
-
n_energy = 10
energy_axis = MapAxis.from_energy_bounds(
50 * u.GeV, 100 * u.TeV, n_energy, name="energy"
@@ -30,13 +31,11 @@ def test_rad_max_roundtrip(tmp_path):
rad_max_2d.write(tmp_path / "rad_max.fits")
rad_max_read = RadMax2D.read(tmp_path / "rad_max.fits")
- assert np.all(rad_max_read.data.data == rad_max)
- assert np.all(rad_max_read.data.data == rad_max_read.data.data)
+ assert np.all(rad_max_read.data == rad_max)
+ assert np.all(rad_max_read.data == rad_max_read.data)
def test_rad_max_from_irf():
- from gammapy.irf import RadMax2D, EffectiveAreaTable2D
-
e_bins = 3
o_bins = 2
energy_axis = MapAxis.from_energy_bounds(1 * u.TeV, 10 * u.TeV, nbin=e_bins, name='energy_true')
@@ -71,3 +70,22 @@ def test_rad_max_from_irf():
assert rad_max.axes['offset'].edges[1] == aeff.axes['offset'].edges[-1]
assert rad_max.quantity.shape == (1, 1)
assert rad_max.quantity[0, 0] == 0.2 * u.deg
+
+
+def test_rad_max_single_bin():
+ energy_axis = MapAxis.from_energy_bounds(0.01, 100, 1, unit="TeV")
+ offset_axis = MapAxis.from_bounds(0., 5, 1, unit="deg", name="offset", )
+
+ rad_max = RadMax2D(
+ data=[[0.1]] * u.deg,
+ axes=[energy_axis, offset_axis],
+ interp_kwargs={"method": "nearest", "fill_value": None}
+ )
+
+ value = rad_max.evaluate(energy=1 * u.TeV, offset=1 * u.deg)
+ assert_allclose(value, 0.1 * u.deg)
+
+ value = rad_max.evaluate(energy=[1, 2, 3] * u.TeV, offset=[[1]] * u.deg)
+ assert value.shape == (1, 3)
+ assert_allclose(value, 0.1 * u.deg)
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 5
} | 0.19 | {
"env_vars": null,
"env_yml_path": [
"environment-dev.yml"
],
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "environment.yml",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio"
],
"pre_install": null,
"python": "3.7",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | accessible-pygments @ file:///home/conda/feedstock_root/build_artifacts/accessible-pygments_1679583834850/work
aiofiles @ file:///home/conda/feedstock_root/build_artifacts/aiofiles_1664378549280/work
aiosqlite @ file:///home/conda/feedstock_root/build_artifacts/aiosqlite_1715928379913/work
alabaster @ file:///home/conda/feedstock_root/build_artifacts/alabaster_1673645646525/work
algopy @ file:///home/conda/feedstock_root/build_artifacts/algopy_1720279197995/work
anyio @ file:///home/conda/feedstock_root/build_artifacts/anyio_1688651106312/work/dist
argon2-cffi @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi_1692818318753/work
argon2-cffi-bindings @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi-bindings_1649500320262/work
arrow @ file:///home/conda/feedstock_root/build_artifacts/arrow_1662382474514/work
arviz @ file:///home/conda/feedstock_root/build_artifacts/arviz_1657673190308/work
astroid @ file:///croot/astroid_1676904296642/work
astropy @ file:///home/conda/feedstock_root/build_artifacts/astropy_1636583255099/work
astropy-healpix @ file:///home/conda/feedstock_root/build_artifacts/astropy-healpix_1663292471788/work
astropy-sphinx-theme @ file:///home/conda/feedstock_root/build_artifacts/astropy-sphinx-theme_1692310954826/work
attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1722977137225/work
Babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1702422572539/work
backcall @ file:///home/conda/feedstock_root/build_artifacts/backcall_1592338393461/work
backports.functools-lru-cache @ file:///home/conda/feedstock_root/build_artifacts/backports.functools_lru_cache_1702571698061/work
backports.zoneinfo @ file:///home/conda/feedstock_root/build_artifacts/backports.zoneinfo_1655749744050/work
beautifulsoup4 @ file:///home/conda/feedstock_root/build_artifacts/beautifulsoup4_1705564648255/work
black @ file:///home/conda/feedstock_root/build_artifacts/black-recipe_1666773063432/work
bleach @ file:///home/conda/feedstock_root/build_artifacts/bleach_1696630167146/work
Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1648883617327/work
cached-property @ file:///home/conda/feedstock_root/build_artifacts/cached_property_1615209429212/work
certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1725278078093/work/certifi
cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1666183775483/work
cftime @ file:///home/conda/feedstock_root/build_artifacts/cftime_1663606412550/work
charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1728479282467/work
click @ file:///home/conda/feedstock_root/build_artifacts/click_1651215140632/work
cmarkgfm @ file:///home/conda/feedstock_root/build_artifacts/cmarkgfm_1649209341849/work
codecov @ file:///home/conda/feedstock_root/build_artifacts/codecov_1681778020913/work
codespell @ file:///home/conda/feedstock_root/build_artifacts/codespell_1686768213814/work
colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1666700638685/work
comm @ file:///home/conda/feedstock_root/build_artifacts/comm_1710320294760/work
corner @ file:///home/conda/feedstock_root/build_artifacts/corner_1680889686676/work
coverage @ file:///home/conda/feedstock_root/build_artifacts/coverage_1638217612102/work
cryptography @ file:///home/conda/feedstock_root/build_artifacts/cryptography_1666563371538/work
cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1635519461629/work
Cython @ file:///home/conda/feedstock_root/build_artifacts/cython_1659101942790/work
debugpy @ file:///home/conda/feedstock_root/build_artifacts/debugpy_1660619049122/work
decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1641555617451/work
defusedxml @ file:///home/conda/feedstock_root/build_artifacts/defusedxml_1615232257335/work
dill @ file:///home/conda/feedstock_root/build_artifacts/dill_1706434688412/work
docutils @ file:///home/conda/feedstock_root/build_artifacts/docutils_1651057213811/work
emcee @ file:///home/conda/feedstock_root/build_artifacts/emcee_1713796893786/work
entrypoints @ file:///home/conda/feedstock_root/build_artifacts/entrypoints_1643888246732/work
exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1720869315914/work
execnet @ file:///home/conda/feedstock_root/build_artifacts/execnet_1688933357483/work
fastjsonschema @ file:///home/conda/feedstock_root/build_artifacts/python-fastjsonschema_1718477020893/work/dist
flake8 @ file:///home/conda/feedstock_root/build_artifacts/flake8_1659645013175/work
fqdn @ file:///home/conda/feedstock_root/build_artifacts/fqdn_1638810296540/work/dist
-e git+https://github.com/gammapy/gammapy.git@e9464236ac26fcec555425d947b18daceffd567a#egg=gammapy
h5py @ file:///home/conda/feedstock_root/build_artifacts/h5py_1660488104552/work
healpy @ file:///home/conda/feedstock_root/build_artifacts/healpy_1658630659780/work
hypothesis @ file:///home/conda/feedstock_root/build_artifacts/hypothesis_1666731800085/work
idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1726459485162/work
imagesize @ file:///home/conda/feedstock_root/build_artifacts/imagesize_1656939531508/work
iminuit @ file:///home/conda/feedstock_root/build_artifacts/iminuit_1664395652684/work
importlib-metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1653252814274/work
importlib-resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1688813467203/work
iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1673103042956/work
ipykernel @ file:///home/conda/feedstock_root/build_artifacts/ipykernel_1666723258080/work
ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1651240553635/work
ipython_genutils @ file:///home/conda/feedstock_root/build_artifacts/ipython_genutils_1716278396992/work
ipywidgets @ file:///home/conda/feedstock_root/build_artifacts/ipywidgets_1724334859652/work
isoduration @ file:///home/conda/feedstock_root/build_artifacts/isoduration_1638811571363/work/dist
isort @ file:///home/conda/feedstock_root/build_artifacts/isort_1675293796116/work
jaraco.classes @ file:///home/conda/feedstock_root/build_artifacts/jaraco.classes_1712041970955/work
jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1696326070614/work
jeepney @ file:///home/conda/feedstock_root/build_artifacts/jeepney_1649085214306/work
Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1715127149914/work
json5 @ file:///home/conda/feedstock_root/build_artifacts/json5_1712986206667/work
jsonpointer==2.0
jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema-meta_1669810440410/work
jupyter @ file:///home/conda/feedstock_root/build_artifacts/jupyter_1725037521377/work
jupyter-console @ file:///home/conda/feedstock_root/build_artifacts/jupyter_console_1676328545892/work
jupyter-events @ file:///home/conda/feedstock_root/build_artifacts/jupyter_events_1690301630599/work
jupyter-server @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_1671591499479/work
jupyter-ydoc @ file:///home/conda/feedstock_root/build_artifacts/jupyter_ydoc_1685535850115/work/dist
jupyter_client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1673615989977/work
jupyter_core @ file:///home/conda/feedstock_root/build_artifacts/jupyter_core_1658332345782/work
jupyter_server_fileid @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_fileid_1714390608391/work
jupyter_server_ydoc @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_ydoc_1678043727957/work
jupyterlab @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_1724937868967/work
jupyterlab_pygments @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_pygments_1700744013163/work
jupyterlab_server @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_server_1690205927615/work
jupyterlab_widgets @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_widgets_1724331334887/work
keyring @ file:///home/conda/feedstock_root/build_artifacts/keyring_1663458641609/work
kiwisolver @ file:///home/conda/feedstock_root/build_artifacts/kiwisolver_1657953088445/work
lazy-object-proxy @ file:///home/conda/feedstock_root/build_artifacts/lazy-object-proxy_1649033171084/work
LHAPDF==6.5.0
markdown-it-py @ file:///home/conda/feedstock_root/build_artifacts/markdown-it-py_1677100944732/work
MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1648737551960/work
matplotlib @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-suite_1636786662993/work
matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1713250518406/work
mccabe @ file:///home/conda/feedstock_root/build_artifacts/mccabe_1643049622439/work
mdurl @ file:///home/conda/feedstock_root/build_artifacts/mdurl_1704317613764/work
mistune @ file:///home/conda/feedstock_root/build_artifacts/mistune_1698947099619/work
more-itertools @ file:///home/conda/feedstock_root/build_artifacts/more-itertools_1690211628840/work
mypy-extensions @ file:///home/conda/feedstock_root/build_artifacts/mypy_extensions_1675543315189/work
naima @ file:///home/conda/feedstock_root/build_artifacts/naima_1732787501953/work
nbclassic @ file:///home/conda/feedstock_root/build_artifacts/nbclassic_1716838762700/work
nbclient @ file:///home/conda/feedstock_root/build_artifacts/nbclient_1665125402713/work
nbconvert @ file:///home/conda/feedstock_root/build_artifacts/nbconvert-meta_1687202153002/work
nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1679336765223/work
nbsphinx @ file:///home/conda/feedstock_root/build_artifacts/nbsphinx_1741075436613/work
nest_asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1705850609492/work
netCDF4 @ file:///home/conda/feedstock_root/build_artifacts/netcdf4_1663234155476/work
notebook @ file:///home/conda/feedstock_root/build_artifacts/notebook_1715848908871/work
notebook_shim @ file:///home/conda/feedstock_root/build_artifacts/notebook-shim_1707957777232/work
numdifftools @ file:///home/conda/feedstock_root/build_artifacts/numdifftools_1676321814026/work
numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1649806299270/work
numpydoc @ file:///home/conda/feedstock_root/build_artifacts/numpydoc_1665273484262/work
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1696202382185/work
pandas==1.3.5
pandocfilters @ file:///home/conda/feedstock_root/build_artifacts/pandocfilters_1631603243851/work
parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1712320355065/work
pathspec @ file:///home/conda/feedstock_root/build_artifacts/pathspec_1702249949303/work
patsy @ file:///home/conda/feedstock_root/build_artifacts/patsy_1704469236901/work
pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1706113125309/work
pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1602536217715/work
Pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1660385854171/work
pkginfo @ file:///home/conda/feedstock_root/build_artifacts/pkginfo_1709561090460/work
pkgutil_resolve_name @ file:///home/conda/feedstock_root/build_artifacts/pkgutil-resolve-name_1694617248815/work
platformdirs @ file:///home/conda/feedstock_root/build_artifacts/platformdirs_1699715570510/work
pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1648772594554/work
ply @ file:///home/conda/feedstock_root/build_artifacts/ply_1712242996588/work
prometheus-client @ file:///home/conda/feedstock_root/build_artifacts/prometheus_client_1689032443210/work
prompt_toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1727341649933/work
psutil @ file:///home/conda/feedstock_root/build_artifacts/psutil_1666155398032/work
ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1609419310487/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
py @ file:///home/conda/feedstock_root/build_artifacts/py_1636301881863/work
pycodestyle @ file:///home/conda/feedstock_root/build_artifacts/pycodestyle_1659638152915/work
pycparser @ file:///home/conda/feedstock_root/build_artifacts/pycparser_1636257122734/work
pydantic @ file:///home/conda/feedstock_root/build_artifacts/pydantic_1662443068984/work
pydata-sphinx-theme==0.13.3
pydocstyle @ file:///home/conda/feedstock_root/build_artifacts/pydocstyle_1672787369895/work
pyerfa @ file:///home/conda/feedstock_root/build_artifacts/pyerfa_1649586111662/work
pyflakes @ file:///home/conda/feedstock_root/build_artifacts/pyflakes_1659210156976/work
Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1700607939962/work
pylint @ file:///home/conda/feedstock_root/build_artifacts/pylint_1678161361281/work
pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1724616129934/work
PyQt5==5.15.7
PyQt5-sip==12.11.0
pyrsistent @ file:///home/conda/feedstock_root/build_artifacts/pyrsistent_1649013358450/work
PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1648857264451/work
pytest==7.4.4
pytest-arraydiff @ file:///home/conda/feedstock_root/build_artifacts/pytest-arraydiff_1701092558977/work
pytest-astropy @ file:///home/conda/feedstock_root/build_artifacts/pytest-astropy_1698141266110/work
pytest-astropy-header @ file:///home/conda/feedstock_root/build_artifacts/pytest-astropy-header_1685485884985/work
pytest-asyncio==0.21.2
pytest-cov @ file:///home/conda/feedstock_root/build_artifacts/pytest-cov_1684964868191/work
pytest-doctestplus @ file:///home/conda/feedstock_root/build_artifacts/pytest-doctestplus_1691778055549/work
pytest-filter-subpackage @ file:///home/conda/feedstock_root/build_artifacts/pytest-filter-subpackage_1692364988795/work
pytest-mock @ file:///home/conda/feedstock_root/build_artifacts/pytest-mock_1686913567133/work
pytest-remotedata @ file:///home/conda/feedstock_root/build_artifacts/pytest-remotedata_1695733477631/work
pytest-runner @ file:///home/conda/feedstock_root/build_artifacts/pytest-runner_1646158889426/work
pytest-sphinx==0.5.0
pytest-xdist @ file:///home/conda/feedstock_root/build_artifacts/pytest-xdist_1700592942746/work
python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1709299778482/work
python-json-logger @ file:///home/conda/feedstock_root/build_artifacts/python-json-logger_1677079630776/work
pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1726055524169/work
PyYAML @ file:///home/conda/feedstock_root/build_artifacts/pyyaml_1648757092905/work
pyzmq @ file:///home/conda/feedstock_root/build_artifacts/pyzmq_1663830492333/work
readme-renderer @ file:///home/conda/feedstock_root/build_artifacts/readme_renderer_1667319054060/work
regions @ file:///home/conda/feedstock_root/build_artifacts/regions_1645553587317/work
reproject @ file:///home/conda/feedstock_root/build_artifacts/reproject_1636666879871/work
requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1716354486713/work
requests-toolbelt @ file:///home/conda/feedstock_root/build_artifacts/requests-toolbelt_1682953341151/work
rfc3339-validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3339-validator_1638811747357/work
rfc3986 @ file:///home/conda/feedstock_root/build_artifacts/rfc3986_1641825045899/work
rfc3986-validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3986-validator_1598024191506/work
rich @ file:///home/conda/feedstock_root/build_artifacts/rich_1726066019428/work/dist
ruamel.yaml @ file:///home/conda/feedstock_root/build_artifacts/ruamel.yaml_1649033206568/work
ruamel.yaml.clib @ file:///home/conda/feedstock_root/build_artifacts/ruamel.yaml.clib_1649013068865/work
scipy @ file:///home/conda/feedstock_root/build_artifacts/scipy_1637806658031/work
SecretStorage @ file:///home/conda/feedstock_root/build_artifacts/secretstorage_1660605887060/work
Send2Trash @ file:///home/conda/feedstock_root/build_artifacts/send2trash_1712584999685/work
setuptools-scm @ file:///home/conda/feedstock_root/build_artifacts/setuptools_scm_1657572406202/work
sip @ file:///home/conda/feedstock_root/build_artifacts/sip_1665592359543/work
six @ file:///home/conda/feedstock_root/build_artifacts/six_1620240208055/work
sniffio @ file:///home/conda/feedstock_root/build_artifacts/sniffio_1708952932303/work
snowballstemmer @ file:///home/conda/feedstock_root/build_artifacts/snowballstemmer_1637143057757/work
sortedcontainers @ file:///home/conda/feedstock_root/build_artifacts/sortedcontainers_1621217038088/work
soupsieve @ file:///home/conda/feedstock_root/build_artifacts/soupsieve_1658207591808/work
Sphinx @ file:///home/conda/feedstock_root/build_artifacts/sphinx_1648404928645/work
sphinx-astropy @ file:///home/conda/feedstock_root/build_artifacts/sphinx-astropy_1686422056269/work
sphinx-automodapi @ file:///home/conda/feedstock_root/build_artifacts/sphinx-automodapi_1692304502184/work
sphinx-click @ file:///home/conda/feedstock_root/build_artifacts/sphinx-click_1715836231309/work
sphinx-copybutton @ file:///home/conda/feedstock_root/build_artifacts/sphinx-copybutton_1681468139876/work
sphinx-gallery @ file:///home/conda/feedstock_root/build_artifacts/sphinx-gallery_1700542355088/work
sphinx-panels @ file:///home/conda/feedstock_root/build_artifacts/sphinx-panels_1629306343569/work
sphinxcontrib-applehelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-applehelp_1674487779667/work
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-htmlhelp_1675256494457/work
sphinxcontrib-jquery @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-jquery_1678808969227/work
sphinxcontrib-jsmath @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-jsmath_1691604704163/work
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-serializinghtml_1649380998999/work
statsmodels @ file:///home/conda/feedstock_root/build_artifacts/statsmodels_1654787101575/work
terminado @ file:///home/conda/feedstock_root/build_artifacts/terminado_1670253674810/work
tinycss2 @ file:///home/conda/feedstock_root/build_artifacts/tinycss2_1729802851396/work
toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1604308577558/work
tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1727974628237/work
tomlkit @ file:///home/conda/feedstock_root/build_artifacts/tomlkit_1715185399719/work
tornado @ file:///home/conda/feedstock_root/build_artifacts/tornado_1656937818679/work
tqdm @ file:///home/conda/feedstock_root/build_artifacts/tqdm_1732497199771/work
traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1675110562325/work
twine @ file:///home/conda/feedstock_root/build_artifacts/twine_1669898575620/work
typed-ast @ file:///home/conda/feedstock_root/build_artifacts/typed-ast_1653226021340/work
typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/typing_extensions_1688315532570/work
uri-template @ file:///home/conda/feedstock_root/build_artifacts/uri-template_1688655812972/work/dist
urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1708239446578/work
wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1699959196938/work
webcolors @ file:///home/conda/feedstock_root/build_artifacts/webcolors_1723294704277/work
webencodings @ file:///home/conda/feedstock_root/build_artifacts/webencodings_1694681268211/work
websocket-client @ file:///home/conda/feedstock_root/build_artifacts/websocket-client_1687789148259/work
widgetsnbextension @ file:///home/conda/feedstock_root/build_artifacts/widgetsnbextension_1724331337528/work
wrapt @ file:///home/conda/feedstock_root/build_artifacts/wrapt_1651495229974/work
xarray @ file:///home/conda/feedstock_root/build_artifacts/xarray_1639125986756/work
xarray-einstats @ file:///home/conda/feedstock_root/build_artifacts/xarray-einstats_1648967091347/work
y-py @ file:///home/conda/feedstock_root/build_artifacts/y-py_1658953063597/work
yamllint @ file:///home/conda/feedstock_root/build_artifacts/yamllint_1684844455576/work
ypy-websocket @ file:///home/conda/feedstock_root/build_artifacts/ypy-websocket_1670333059911/work
zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1677313463193/work
| name: gammapy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_gnu
- accessible-pygments=0.0.4=pyhd8ed1ab_0
- aiofiles=22.1.0=pyhd8ed1ab_0
- aiosqlite=0.20.0=pyhd8ed1ab_0
- alabaster=0.7.13=pyhd8ed1ab_0
- algopy=0.6.1=pyhd8ed1ab_0
- alsa-lib=1.2.8=h166bdaf_0
- anyio=3.7.1=pyhd8ed1ab_0
- argon2-cffi=23.1.0=pyhd8ed1ab_0
- argon2-cffi-bindings=21.2.0=py37h540881e_2
- arrow=1.2.3=pyhd8ed1ab_0
- arviz=0.12.1=pyhd8ed1ab_1
- astroid=2.14.2=py37h06a4308_0
- astropy=4.3.1=py37hb1e94ed_2
- astropy-base=4.3.1=hda767d9_0
- astropy-healpix=0.7=py37hc105733_0
- astropy-sphinx-theme=1.1=pyhd8ed1ab_0
- attr=2.5.1=h166bdaf_1
- attrs=24.2.0=pyh71513ae_0
- babel=2.14.0=pyhd8ed1ab_0
- backcall=0.2.0=pyh9f0ad1d_0
- backports=1.0=pyhd8ed1ab_4
- backports.functools_lru_cache=2.0.0=pyhd8ed1ab_0
- backports.zoneinfo=0.2.1=py37h540881e_5
- beautifulsoup4=4.12.3=pyha770c72_0
- black=22.10.0=py37h89c1867_1
- bleach=6.1.0=pyhd8ed1ab_0
- brotli-python=1.0.9=py37hd23a5d3_7
- bzip2=1.0.8=h4bc722e_7
- c-ares=1.34.4=hb9d3cd8_0
- ca-certificates=2025.1.31=hbcca054_0
- cached-property=1.5.2=hd8ed1ab_1
- cached_property=1.5.2=pyha770c72_1
- cairo=1.16.0=ha61ee94_1012
- certifi=2024.8.30=pyhd8ed1ab_0
- cffi=1.15.1=py37h43b0acd_1
- cfitsio=4.1.0=hd9d235c_0
- cftime=1.6.2=py37hc105733_0
- charset-normalizer=3.4.0=pyhd8ed1ab_0
- click=8.1.3=py37h89c1867_0
- cmarkgfm=0.8.0=py37h540881e_1
- codecov=2.1.13=pyhd8ed1ab_0
- codespell=2.2.5=pyhd8ed1ab_0
- colorama=0.4.6=pyhd8ed1ab_0
- comm=0.2.2=pyhd8ed1ab_0
- corner=2.2.2=pyhd8ed1ab_0
- coverage=6.2=py37h5e8e339_0
- cryptography=38.0.2=py37h5994e8b_1
- curl=8.1.2=h409715c_0
- cycler=0.11.0=pyhd8ed1ab_0
- cython=0.29.32=py37hd23a5d3_0
- dbus=1.13.6=h5008d03_3
- debugpy=1.6.3=py37hd23a5d3_0
- decorator=5.1.1=pyhd8ed1ab_0
- defusedxml=0.7.1=pyhd8ed1ab_0
- dill=0.3.8=pyhd8ed1ab_0
- docutils=0.17.1=py37h89c1867_2
- emcee=3.1.6=pyhd8ed1ab_0
- entrypoints=0.4=pyhd8ed1ab_0
- exceptiongroup=1.2.2=pyhd8ed1ab_0
- execnet=2.0.2=pyhd8ed1ab_0
- expat=2.7.0=h5888daf_0
- fftw=3.3.10=nompi_hf1063bd_110
- flake8=5.0.4=pyhd8ed1ab_0
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=h77eed37_3
- fontconfig=2.15.0=h7e30c49_1
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- fqdn=1.5.1=pyhd8ed1ab_0
- freetype=2.13.3=h48d6fc4_0
- gettext=0.23.1=h5888daf_0
- gettext-tools=0.23.1=h5888daf_0
- glib=2.84.0=h07242d1_0
- glib-tools=2.84.0=h4833e2c_0
- graphite2=1.3.13=h59595ed_1003
- gst-plugins-base=1.22.0=h4243ec0_2
- gstreamer=1.22.0=h25f0c4b_2
- gstreamer-orc=0.4.41=h17648ed_0
- h5py=3.7.0=nompi_py37hf1ce037_101
- harfbuzz=6.0.0=h8e241bc_0
- hdf4=4.2.15=h9772cbc_5
- hdf5=1.12.2=nompi_h4df4325_101
- healpy=1.16.1=py37hc950561_0
- hepmc2=2.06.11=h5888daf_3
- hypothesis=6.56.3=py37h89c1867_1
- icu=70.1=h27087fc_0
- idna=3.10=pyhd8ed1ab_0
- imagesize=1.4.1=pyhd8ed1ab_0
- iminuit=2.17.0=py37hd23a5d3_0
- importlib-metadata=4.11.4=py37h89c1867_0
- importlib_metadata=4.11.4=hd8ed1ab_0
- importlib_resources=6.0.0=pyhd8ed1ab_0
- iniconfig=2.0.0=pyhd8ed1ab_0
- ipykernel=6.16.2=pyh210e3f2_0
- ipython=7.33.0=py37h89c1867_0
- ipython_genutils=0.2.0=pyhd8ed1ab_1
- ipywidgets=8.1.5=pyhd8ed1ab_0
- isoduration=20.11.0=pyhd8ed1ab_0
- isort=5.11.5=pyhd8ed1ab_0
- jack=1.9.22=h11f4161_0
- jaraco.classes=3.4.0=pyhd8ed1ab_0
- jedi=0.19.1=pyhd8ed1ab_0
- jeepney=0.8.0=pyhd8ed1ab_0
- jinja2=3.1.4=pyhd8ed1ab_0
- jpeg=9e=h0b41bf4_3
- json5=0.9.25=pyhd8ed1ab_0
- jsonpointer=2.0=py_0
- jsonschema=4.17.3=pyhd8ed1ab_0
- jsonschema-with-format-nongpl=4.17.3=pyhd8ed1ab_0
- jupyter=1.1.1=pyhd8ed1ab_0
- jupyter_client=7.4.9=pyhd8ed1ab_0
- jupyter_console=6.5.1=pyhd8ed1ab_0
- jupyter_core=4.11.1=py37h89c1867_0
- jupyter_events=0.6.3=pyhd8ed1ab_1
- jupyter_server=1.23.4=pyhd8ed1ab_0
- jupyter_server_fileid=0.9.2=pyhd8ed1ab_0
- jupyter_server_ydoc=0.8.0=pyhd8ed1ab_0
- jupyter_ydoc=0.2.4=pyhd8ed1ab_0
- jupyterlab=3.6.8=pyhd8ed1ab_0
- jupyterlab_pygments=0.3.0=pyhd8ed1ab_0
- jupyterlab_server=2.24.0=pyhd8ed1ab_0
- jupyterlab_widgets=3.0.13=pyhd8ed1ab_0
- keyring=23.9.3=py37h89c1867_0
- keyutils=1.6.1=h166bdaf_0
- kiwisolver=1.4.4=py37h7cecad7_0
- krb5=1.20.1=h81ceb04_0
- lame=3.100=h166bdaf_1003
- lazy-object-proxy=1.7.1=py37h540881e_1
- lcms2=2.14=h6ed2654_0
- ld_impl_linux-64=2.43=h712a8e2_4
- lerc=4.0.0=h27087fc_0
- lhapdf=6.5.0=py37h7cecad7_1
- libaec=1.1.3=h59595ed_0
- libasprintf=0.23.1=h8e693c7_0
- libasprintf-devel=0.23.1=h8e693c7_0
- libblas=3.9.0=20_linux64_openblas
- libcap=2.67=he9d0100_0
- libcblas=3.9.0=20_linux64_openblas
- libclang=15.0.7=default_h127d8a8_5
- libclang13=15.0.7=default_h5d6823c_5
- libcups=2.3.3=h36d4200_3
- libcurl=8.1.2=h409715c_0
- libdb=6.2.32=h9c3ff4c_0
- libdeflate=1.14=h166bdaf_0
- libedit=3.1.20250104=pl5321h7949ede_0
- libev=4.33=hd590300_2
- libevent=2.1.10=h28343ad_4
- libexpat=2.7.0=h5888daf_0
- libffi=3.4.6=h2dba641_1
- libflac=1.4.3=h59595ed_0
- libgcc=14.2.0=h767d61c_2
- libgcc-ng=14.2.0=h69a702a_2
- libgcrypt=1.11.0=ha770c72_2
- libgcrypt-devel=1.11.0=hb9d3cd8_2
- libgcrypt-lib=1.11.0=hb9d3cd8_2
- libgcrypt-tools=1.11.0=hb9d3cd8_2
- libgettextpo=0.23.1=h5888daf_0
- libgettextpo-devel=0.23.1=h5888daf_0
- libgfortran=14.2.0=h69a702a_2
- libgfortran-ng=14.2.0=h69a702a_2
- libgfortran5=14.2.0=hf1ad2bd_2
- libglib=2.84.0=h2ff4ddf_0
- libgomp=14.2.0=h767d61c_2
- libgpg-error=1.51=hbd13f7d_1
- libiconv=1.18=h4ce23a2_1
- liblapack=3.9.0=20_linux64_openblas
- libllvm15=15.0.7=hadd5161_1
- libltdl=2.4.3a=h5888daf_0
- liblzma=5.6.4=hb9d3cd8_0
- liblzma-devel=5.6.4=hb9d3cd8_0
- libnetcdf=4.8.1=nompi_h261ec11_106
- libnghttp2=1.58.0=h47da74e_0
- libnsl=2.0.1=hd590300_0
- libogg=1.3.5=h4ab18f5_0
- libopenblas=0.3.25=pthreads_h413a1c8_0
- libopus=1.3.1=h7f98852_1
- libpng=1.6.47=h943b412_0
- libpq=15.3=hbcd7760_1
- libsndfile=1.2.2=hc60ed4a_1
- libsodium=1.0.18=h36c2ea0_1
- libsqlite=3.49.1=hee588c1_2
- libssh2=1.11.0=h0841786_0
- libstdcxx=14.2.0=h8f9b012_2
- libstdcxx-ng=14.2.0=h4852527_2
- libsystemd0=253=h8c4010b_1
- libtiff=4.4.0=h82bc61c_5
- libtool=2.5.4=h5888daf_0
- libudev1=253=h0b41bf4_1
- libuuid=2.38.1=h0b41bf4_0
- libvorbis=1.3.7=h9c3ff4c_0
- libwebp-base=1.5.0=h851e524_0
- libxcb=1.13=h7f98852_1004
- libxkbcommon=1.5.0=h79f4944_1
- libxml2=2.10.3=hca2bb57_4
- libzip=1.10.1=h2629f0a_3
- libzlib=1.3.1=hb9d3cd8_2
- lz4-c=1.9.4=hcb278e6_0
- markdown-it-py=2.2.0=pyhd8ed1ab_0
- markupsafe=2.1.1=py37h540881e_1
- matplotlib=3.4.3=py37h89c1867_2
- matplotlib-base=3.4.3=py37h1058ff1_2
- matplotlib-inline=0.1.7=pyhd8ed1ab_0
- mccabe=0.7.0=pyhd8ed1ab_0
- mdurl=0.1.2=pyhd8ed1ab_0
- mistune=3.0.2=pyhd8ed1ab_0
- more-itertools=10.0.0=pyhd8ed1ab_0
- mpg123=1.32.9=hc50e24c_0
- mypy_extensions=1.0.0=pyha770c72_0
- mysql-common=8.0.33=hf1915f5_6
- mysql-libs=8.0.33=hca2cd23_6
- naima=0.10.0=pyhd8ed1ab_3
- nbclassic=1.1.0=pyhd8ed1ab_0
- nbclient=0.7.0=pyhd8ed1ab_0
- nbconvert=7.6.0=pyhd8ed1ab_0
- nbconvert-core=7.6.0=pyhd8ed1ab_0
- nbconvert-pandoc=7.6.0=pyhd8ed1ab_0
- nbformat=5.8.0=pyhd8ed1ab_0
- nbsphinx=0.9.7=pyhd8ed1ab_0
- ncurses=6.5=h2d0b736_3
- nest-asyncio=1.6.0=pyhd8ed1ab_0
- netcdf4=1.6.1=nompi_py37hb61e06c_100
- notebook=6.5.7=pyha770c72_0
- notebook-shim=0.2.4=pyhd8ed1ab_0
- nspr=4.36=h5888daf_0
- nss=3.110=h159eef7_0
- numdifftools=0.9.41=pyhd8ed1ab_0
- numpy=1.21.6=py37h976b520_0
- numpydoc=1.5.0=pyhd8ed1ab_0
- openjpeg=2.5.0=h7d73246_1
- openssl=3.1.8=h7b32b05_0
- packaging=23.2=pyhd8ed1ab_0
- pandas=1.3.5=py37he8f5f7f_0
- pandoc=3.6.4=ha770c72_0
- pandocfilters=1.5.0=pyhd8ed1ab_0
- parso=0.8.4=pyhd8ed1ab_0
- pathspec=0.12.1=pyhd8ed1ab_0
- patsy=0.5.6=pyhd8ed1ab_0
- pcre2=10.44=hba22ea6_2
- pexpect=4.9.0=pyhd8ed1ab_0
- pickleshare=0.7.5=py_1003
- pillow=9.2.0=py37h850a105_2
- pip=24.0=pyhd8ed1ab_0
- pixman=0.44.2=h29eaf8c_0
- pkginfo=1.10.0=pyhd8ed1ab_0
- pkgutil-resolve-name=1.3.10=pyhd8ed1ab_1
- platformdirs=4.0.0=pyhd8ed1ab_0
- pluggy=1.0.0=py37h89c1867_3
- ply=3.11=pyhd8ed1ab_2
- prometheus_client=0.17.1=pyhd8ed1ab_0
- prompt-toolkit=3.0.48=pyha770c72_0
- prompt_toolkit=3.0.48=hd8ed1ab_1
- psutil=5.9.3=py37h540881e_0
- pthread-stubs=0.4=hb9d3cd8_1002
- ptyprocess=0.7.0=pyhd3deb0d_0
- pulseaudio=16.1=hcb278e6_3
- pulseaudio-client=16.1=h5195f5e_3
- pulseaudio-daemon=16.1=ha8d29e2_3
- py=1.11.0=pyh6c4a22f_0
- pycodestyle=2.9.1=pyhd8ed1ab_0
- pycparser=2.21=pyhd8ed1ab_0
- pydantic=1.10.2=py37h540881e_0
- pydata-sphinx-theme=0.13.3=pyhd8ed1ab_0
- pydocstyle=6.2.0=pyhd8ed1ab_0
- pyerfa=2.0.0.1=py37hda87dfa_2
- pyflakes=2.5.0=pyhd8ed1ab_0
- pygments=2.17.2=pyhd8ed1ab_0
- pylint=2.16.4=pyhd8ed1ab_0
- pyparsing=3.1.4=pyhd8ed1ab_0
- pyqt=5.15.7=py37hf30b843_1
- pyqt5-sip=12.11.0=py37hd23a5d3_1
- pyrsistent=0.18.1=py37h540881e_1
- pysocks=1.7.1=py37h89c1867_5
- pytest-arraydiff=0.6.1=pyhd8ed1ab_0
- pytest-astropy=0.11.0=pyhd8ed1ab_0
- pytest-astropy-header=0.2.2=pyhd8ed1ab_0
- pytest-cov=4.1.0=pyhd8ed1ab_0
- pytest-doctestplus=1.0.0=pyhd8ed1ab_0
- pytest-filter-subpackage=0.1.2=pyhd8ed1ab_0
- pytest-mock=3.11.1=pyhd8ed1ab_0
- pytest-remotedata=0.4.1=pyhd8ed1ab_0
- pytest-runner=6.0.0=pyhd8ed1ab_0
- pytest-xdist=3.5.0=pyhd8ed1ab_0
- python=3.7.12=hf930737_100_cpython
- python-dateutil=2.9.0=pyhd8ed1ab_0
- python-fastjsonschema=2.20.0=pyhd8ed1ab_0
- python-json-logger=2.0.7=pyhd8ed1ab_0
- python_abi=3.7=4_cp37m
- pytz=2024.2=pyhd8ed1ab_0
- pyyaml=6.0=py37h540881e_4
- pyzmq=24.0.1=py37h0c0c2a8_0
- qt-main=5.15.8=h5d23da1_6
- readline=8.2=h8c095d6_2
- readme_renderer=37.3=pyhd8ed1ab_0
- regions=0.5=py37h5e8e339_1
- reproject=0.8=py37hb1e94ed_2
- requests=2.32.2=pyhd8ed1ab_0
- requests-toolbelt=1.0.0=pyhd8ed1ab_0
- rfc3339-validator=0.1.4=pyhd8ed1ab_0
- rfc3986=2.0.0=pyhd8ed1ab_0
- rfc3986-validator=0.1.1=pyh9f0ad1d_0
- rich=13.8.1=pyhd8ed1ab_0
- ruamel.yaml=0.17.21=py37h540881e_1
- ruamel.yaml.clib=0.2.6=py37h540881e_1
- scipy=1.7.3=py37hf2a6cf1_0
- secretstorage=3.3.3=py37h89c1867_0
- send2trash=1.8.3=pyh0d859eb_0
- setuptools=59.8.0=py37h89c1867_1
- setuptools-scm=7.0.5=pyhd8ed1ab_0
- setuptools_scm=7.0.5=hd8ed1ab_1
- sherpa=2.2.16=h8953d3f_3
- sip=6.7.2=py37hd23a5d3_0
- six=1.16.0=pyh6c4a22f_0
- sniffio=1.3.1=pyhd8ed1ab_0
- snowballstemmer=2.2.0=pyhd8ed1ab_0
- sortedcontainers=2.4.0=pyhd8ed1ab_0
- soupsieve=2.3.2.post1=pyhd8ed1ab_0
- sphinx=4.5.0=pyh6c4a22f_0
- sphinx-astropy=1.9.1=pyhd8ed1ab_0
- sphinx-automodapi=0.16.0=pyh6ff6d48_0
- sphinx-click=6.0.0=pyhd8ed1ab_0
- sphinx-copybutton=0.5.2=pyhd8ed1ab_0
- sphinx-gallery=0.15.0=pyhd8ed1ab_0
- sphinx-panels=0.6.0=pyhd8ed1ab_0
- sphinxcontrib-applehelp=1.0.4=pyhd8ed1ab_0
- sphinxcontrib-devhelp=1.0.2=py_0
- sphinxcontrib-htmlhelp=2.0.1=pyhd8ed1ab_0
- sphinxcontrib-jquery=4.1=pyhd8ed1ab_0
- sphinxcontrib-jsmath=1.0.1=pyhd8ed1ab_0
- sphinxcontrib-qthelp=1.0.3=py_0
- sphinxcontrib-serializinghtml=1.1.5=pyhd8ed1ab_2
- sqlite=3.49.1=h9eae976_2
- statsmodels=0.13.2=py37hda87dfa_0
- terminado=0.17.1=pyh41d4057_0
- tinycss2=1.4.0=pyhd8ed1ab_0
- tk=8.6.13=noxft_h4845f30_101
- toml=0.10.2=pyhd8ed1ab_0
- tomli=2.0.2=pyhd8ed1ab_0
- tomlkit=0.12.5=pyha770c72_0
- tornado=6.2=py37h540881e_0
- tqdm=4.67.1=pyhd8ed1ab_0
- traitlets=5.9.0=pyhd8ed1ab_0
- twine=4.0.2=pyhd8ed1ab_0
- typed-ast=1.5.4=py37h540881e_0
- typing-extensions=4.7.1=hd8ed1ab_0
- typing_extensions=4.7.1=pyha770c72_0
- tzdata=2025b=h78e105d_0
- uri-template=1.3.0=pyhd8ed1ab_0
- urllib3=2.2.1=pyhd8ed1ab_0
- wcwidth=0.2.10=pyhd8ed1ab_0
- webcolors=24.8.0=pyhd8ed1ab_0
- webencodings=0.5.1=pyhd8ed1ab_2
- websocket-client=1.6.1=pyhd8ed1ab_0
- wheel=0.42.0=pyhd8ed1ab_0
- widgetsnbextension=4.0.13=pyhd8ed1ab_0
- wrapt=1.14.1=py37h540881e_0
- xarray=0.20.2=pyhd8ed1ab_0
- xarray-einstats=0.2.2=pyhd8ed1ab_0
- xcb-util=0.4.0=h516909a_0
- xcb-util-image=0.4.0=h166bdaf_0
- xcb-util-keysyms=0.4.0=h516909a_0
- xcb-util-renderutil=0.3.9=h166bdaf_0
- xcb-util-wm=0.4.1=h516909a_0
- xkeyboard-config=2.38=h0b41bf4_0
- xorg-kbproto=1.0.7=hb9d3cd8_1003
- xorg-libice=1.1.2=hb9d3cd8_0
- xorg-libsm=1.2.6=he73a12e_0
- xorg-libx11=1.8.4=h0b41bf4_0
- xorg-libxau=1.0.12=hb9d3cd8_0
- xorg-libxdmcp=1.1.5=hb9d3cd8_0
- xorg-libxext=1.3.4=h0b41bf4_2
- xorg-libxrender=0.9.10=h7f98852_1003
- xorg-renderproto=0.11.1=hb9d3cd8_1003
- xorg-xextproto=7.3.0=hb9d3cd8_1004
- xorg-xproto=7.0.31=hb9d3cd8_1008
- xz=5.6.4=hbcc6ac9_0
- xz-gpl-tools=5.6.4=hbcc6ac9_0
- xz-tools=5.6.4=hb9d3cd8_0
- y-py=0.5.4=py37hbd0741f_0
- yaml=0.2.5=h7f98852_2
- yamllint=1.32.0=pyhd8ed1ab_0
- ypy-websocket=0.8.2=pyhd8ed1ab_0
- zeromq=4.3.5=h59595ed_1
- zipp=3.15.0=pyhd8ed1ab_0
- zlib=1.3.1=hb9d3cd8_2
- zstd=1.5.7=hb8e6e7a_2
- pip:
- gammapy==0.20.dev469+ge9464236a
- pytest==7.4.4
- pytest-asyncio==0.21.2
- pytest-sphinx==0.5.0
variables:
PYTHONNOUSERSITE: '1'
prefix: /opt/conda/envs/gammapy
| [
"gammapy/irf/tests/test_rad_max.py::test_rad_max_single_bin"
] | [] | [
"gammapy/irf/tests/test_rad_max.py::test_rad_max_roundtrip",
"gammapy/irf/tests/test_rad_max.py::test_rad_max_from_irf"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,334 | 3,005 | [
"gammapy/data/event_list.py",
"gammapy/irf/core.py",
"gammapy/irf/rad_max.py",
"gammapy/makers/utils.py",
"gammapy/utils/interpolation.py"
] |
canonical__charmcraft-698 | e0b3280684d8ab96959f0b96190e42a2f26a061a | 2022-03-04 00:43:24 | fb50a098dafb4878c7e6aa6562240afd1a936f4e | diff --git a/charmcraft/commands/store/__init__.py b/charmcraft/commands/store/__init__.py
index cd187442..e1589462 100644
--- a/charmcraft/commands/store/__init__.py
+++ b/charmcraft/commands/store/__init__.py
@@ -922,9 +922,13 @@ def _get_lib_info(*, full_name=None, lib_path=None):
# dir and Python extension.
# e.g.: charms.mycharm.v4.foo -> lib/charms/mycharm/v4/foo.py
try:
- charmsdir, importable_charm_name, v_api, libfile = full_name.split(".")
+ charmsdir, charm_name, v_api, libfile = full_name.split(".")
except ValueError:
raise _BadLibraryNameError(full_name)
+
+ # the lib full_name includes the charm_name which might not be importable (dashes)
+ importable_charm_name = create_importable_name(charm_name)
+
if charmsdir != "charms":
raise _BadLibraryNameError(full_name)
path = pathlib.Path("lib")
| Running `charmcraft fetch-lib` on a charm with dashes should fetch to a path with underscores
If you have a library for a charm (such as nginx-ingress-integrator) that has dashes in the name, and run `charmcraft fetch-lib charms.nginx-ingress-integrator.v0.charms` it should still fetch to a path on disk that has underscores rather than dashes, so it's importable. | canonical/charmcraft | diff --git a/tests/commands/test_store_commands.py b/tests/commands/test_store_commands.py
index 76f1309a..a80936ea 100644
--- a/tests/commands/test_store_commands.py
+++ b/tests/commands/test_store_commands.py
@@ -2469,6 +2469,13 @@ def test_getlibinfo_bad_name(name):
)
+def test_getlibinfo_not_importable_charm_name():
+ """Libraries should be save under importable paths."""
+ lib_data = _get_lib_info(full_name="charms.operator-libs-linux.v0.apt")
+ assert lib_data.charm_name == "operator-libs-linux"
+ assert lib_data.path == pathlib.Path("lib/charms/operator_libs_linux/v0/apt.py")
+
+
@pytest.mark.skipif(sys.platform == "win32", reason="Windows not [yet] supported")
@pytest.mark.parametrize(
"path",
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 1.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -r requirements-dev.txt -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libffi-dev libapt-pkg-dev libssl-dev"
],
"python": "3.8",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==21.4.0
black==22.1.0
certifi==2021.10.8
cffi==1.15.0
-e git+https://github.com/canonical/charmcraft.git@e0b3280684d8ab96959f0b96190e42a2f26a061a#egg=charmcraft
charset-normalizer==2.0.12
click==8.0.4
coverage==6.3.2
craft-cli==0.2.0
craft-parts==1.2.0
craft-providers==1.0.4
craft-store==2.0.1
cryptography==3.4
Deprecated==1.2.13
exceptiongroup==1.2.2
flake8==4.0.1
humanize==4.0.0
idna==3.3
importlib-metadata==4.11.2
importlib-resources==5.4.0
iniconfig==1.1.1
jeepney==0.7.1
Jinja2==3.0.3
jsonschema==4.4.0
keyring==23.5.0
macaroonbakery==1.3.1
MarkupSafe==2.1.0
mccabe==0.6.1
mypy-extensions==0.4.3
ops==1.3.0
overrides==6.1.0
packaging==21.3
pathspec==0.9.0
platformdirs==2.5.1
pluggy==1.0.0
protobuf==3.19.4
py==1.11.0
pycodestyle==2.8.0
pycparser==2.21
pydantic==1.8.2
pydantic-yaml==0.6.1
pydocstyle==6.1.1
pyflakes==2.4.0
pymacaroons==0.13.0
PyNaCl==1.5.0
pyparsing==3.0.7
pyRFC3339==1.1
pyrsistent==0.18.1
pytest==7.0.1
python-dateutil==2.8.2
pytz==2021.3
pyxdg==0.27
PyYAML==6.0
requests==2.27.1
requests-toolbelt==0.9.1
requests-unixsocket==0.3.0
responses==0.18.0
SecretStorage==3.3.1
semantic-version==2.9.0
semver==3.0.0.dev3
setuptools-rust==1.1.2
six==1.16.0
snap-helpers==0.2.0
snowballstemmer==2.2.0
tabulate==0.8.9
tomli==2.0.1
types-Deprecated==1.2.5
typing-utils==0.1.0
typing_extensions==4.1.1
urllib3==1.26.8
wrapt==1.13.3
zipp==3.7.0
| name: charmcraft
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=24.2=py38h06a4308_0
- python=3.8.20=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.1.0=py38h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.44.0=py38h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==21.4.0
- black==22.1.0
- certifi==2021.10.8
- cffi==1.15.0
- charmcraft==1.5.0+2.ge0b32806
- charset-normalizer==2.0.12
- click==8.0.4
- coverage==6.3.2
- craft-cli==0.2.0
- craft-parts==1.2.0
- craft-providers==1.0.4
- craft-store==2.0.1
- cryptography==3.4
- deprecated==1.2.13
- exceptiongroup==1.2.2
- flake8==4.0.1
- humanize==4.0.0
- idna==3.3
- importlib-metadata==4.11.2
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jeepney==0.7.1
- jinja2==3.0.3
- jsonschema==4.4.0
- keyring==23.5.0
- macaroonbakery==1.3.1
- markupsafe==2.1.0
- mccabe==0.6.1
- mypy-extensions==0.4.3
- ops==1.3.0
- overrides==6.1.0
- packaging==21.3
- pathspec==0.9.0
- platformdirs==2.5.1
- pluggy==1.0.0
- protobuf==3.19.4
- py==1.11.0
- pycodestyle==2.8.0
- pycparser==2.21
- pydantic==1.8.2
- pydantic-yaml==0.6.1
- pydocstyle==6.1.1
- pyflakes==2.4.0
- pymacaroons==0.13.0
- pynacl==1.5.0
- pyparsing==3.0.7
- pyrfc3339==1.1
- pyrsistent==0.18.1
- pytest==7.0.1
- python-dateutil==2.8.2
- pytz==2021.3
- pyxdg==0.27
- pyyaml==6.0
- requests==2.27.1
- requests-toolbelt==0.9.1
- requests-unixsocket==0.3.0
- responses==0.18.0
- secretstorage==3.3.1
- semantic-version==2.9.0
- semver==3.0.0.dev3
- setuptools-rust==1.1.2
- six==1.16.0
- snap-helpers==0.2.0
- snowballstemmer==2.2.0
- tabulate==0.8.9
- tomli==2.0.1
- types-deprecated==1.2.5
- typing-extensions==4.1.1
- typing-utils==0.1.0
- urllib3==1.26.8
- wrapt==1.13.3
- zipp==3.7.0
prefix: /opt/conda/envs/charmcraft
| [
"tests/commands/test_store_commands.py::test_getlibinfo_not_importable_charm_name"
] | [
"tests/commands/test_store_commands.py::test_createlib_path_can_not_write"
] | [
"tests/commands/test_store_commands.py::test_get_name_from_metadata_ok",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_no_file",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_bad_content_garbage",
"tests/commands/test_store_commands.py::test_get_name_from_metadata_bad_content_no_name",
"tests/commands/test_store_commands.py::test_login_simple",
"tests/commands/test_store_commands.py::test_login_exporting",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[charm]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[bundle]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[permission]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[channel]",
"tests/commands/test_store_commands.py::test_login_restrictions_without_export[ttl]",
"tests/commands/test_store_commands.py::test_login_restricting_ttl",
"tests/commands/test_store_commands.py::test_login_restricting_channels",
"tests/commands/test_store_commands.py::test_login_restricting_permissions",
"tests/commands/test_store_commands.py::test_login_restricting_permission_invalid",
"tests/commands/test_store_commands.py::test_login_restricting_charms",
"tests/commands/test_store_commands.py::test_login_restricting_bundles",
"tests/commands/test_store_commands.py::test_login_restriction_mix",
"tests/commands/test_store_commands.py::test_logout",
"tests/commands/test_store_commands.py::test_logout_but_not_logged_in",
"tests/commands/test_store_commands.py::test_whoami",
"tests/commands/test_store_commands.py::test_whoami_but_not_logged_in",
"tests/commands/test_store_commands.py::test_whoami_with_channels",
"tests/commands/test_store_commands.py::test_whoami_with_charms",
"tests/commands/test_store_commands.py::test_whoami_with_bundles",
"tests/commands/test_store_commands.py::test_whoami_comprehensive",
"tests/commands/test_store_commands.py::test_register_charm_name",
"tests/commands/test_store_commands.py::test_register_bundle_name",
"tests/commands/test_store_commands.py::test_list_registered_empty",
"tests/commands/test_store_commands.py::test_list_registered_one_private",
"tests/commands/test_store_commands.py::test_list_registered_one_public",
"tests/commands/test_store_commands.py::test_list_registered_several",
"tests/commands/test_store_commands.py::test_get_name_bad_zip",
"tests/commands/test_store_commands.py::test_get_name_charm_ok",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[=]",
"tests/commands/test_store_commands.py::test_get_name_charm_bad_metadata[foo:",
"tests/commands/test_store_commands.py::test_get_name_bundle_ok",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[=]",
"tests/commands/test_store_commands.py::test_get_name_bundle_bad_data[foo:",
"tests/commands/test_store_commands.py::test_get_name_nor_charm_nor_bundle",
"tests/commands/test_store_commands.py::test_upload_parameters_filepath_type",
"tests/commands/test_store_commands.py::test_upload_call_ok",
"tests/commands/test_store_commands.py::test_upload_call_error",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release",
"tests/commands/test_store_commands.py::test_upload_call_ok_including_release_multiple",
"tests/commands/test_store_commands.py::test_upload_including_release_with_resources",
"tests/commands/test_store_commands.py::test_upload_options_resource",
"tests/commands/test_store_commands.py::test_upload_call_error_including_release",
"tests/commands/test_store_commands.py::test_upload_charm_with_init_template_todo_token",
"tests/commands/test_store_commands.py::test_upload_with_different_name_than_in_metadata",
"tests/commands/test_store_commands.py::test_revisions_simple",
"tests/commands/test_store_commands.py::test_revisions_empty",
"tests/commands/test_store_commands.py::test_revisions_ordered_by_revision",
"tests/commands/test_store_commands.py::test_revisions_version_null",
"tests/commands/test_store_commands.py::test_revisions_errors_simple",
"tests/commands/test_store_commands.py::test_revisions_errors_multiple",
"tests/commands/test_store_commands.py::test_release_simple_ok",
"tests/commands/test_store_commands.py::test_release_simple_multiple_channels",
"tests/commands/test_store_commands.py::test_release_including_resources",
"tests/commands/test_store_commands.py::test_release_options_resource",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs0-expected_parsed0]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs1-expected_parsed1]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs2-expected_parsed2]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs3-expected_parsed3]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs4-expected_parsed4]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs5-expected_parsed5]",
"tests/commands/test_store_commands.py::test_release_parameters_ok[sysargs6-expected_parsed6]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs0]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs1]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs2]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs3]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs4]",
"tests/commands/test_store_commands.py::test_release_parameters_bad[sysargs5]",
"tests/commands/test_store_commands.py::test_close_simple_ok",
"tests/commands/test_store_commands.py::test_status_simple_ok",
"tests/commands/test_store_commands.py::test_status_empty",
"tests/commands/test_store_commands.py::test_status_channels_not_released_with_fallback",
"tests/commands/test_store_commands.py::test_status_channels_not_released_without_fallback",
"tests/commands/test_store_commands.py::test_status_multiple_tracks",
"tests/commands/test_store_commands.py::test_status_tracks_order",
"tests/commands/test_store_commands.py::test_status_with_one_branch",
"tests/commands/test_store_commands.py::test_status_with_multiple_branches",
"tests/commands/test_store_commands.py::test_status_with_resources",
"tests/commands/test_store_commands.py::test_status_with_resources_missing_after_closed_channel",
"tests/commands/test_store_commands.py::test_status_with_resources_and_branches",
"tests/commands/test_store_commands.py::test_status_multiplebases_single_track",
"tests/commands/test_store_commands.py::test_status_multiplebases_multiple_tracks",
"tests/commands/test_store_commands.py::test_status_multiplebases_everything_combined",
"tests/commands/test_store_commands.py::test_status_with_base_in_none",
"tests/commands/test_store_commands.py::test_status_ureleased_track",
"tests/commands/test_store_commands.py::test_createlib_simple",
"tests/commands/test_store_commands.py::test_createlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_createlib_name_contains_dash",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo.bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[foo/bar]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[Foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[123foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[_foo]",
"tests/commands/test_store_commands.py::test_createlib_invalid_name[]",
"tests/commands/test_store_commands.py::test_createlib_path_already_there",
"tests/commands/test_store_commands.py::test_createlib_library_template_is_python",
"tests/commands/test_store_commands.py::test_publishlib_simple",
"tests/commands/test_store_commands.py::test_publishlib_contains_dash",
"tests/commands/test_store_commands.py::test_publishlib_all",
"tests/commands/test_store_commands.py::test_publishlib_not_found",
"tests/commands/test_store_commands.py::test_publishlib_not_from_current_charm",
"tests/commands/test_store_commands.py::test_publishlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_publishlib_store_is_advanced",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_ok",
"tests/commands/test_store_commands.py::test_publishlib_store_is_exactly_behind_same_hash",
"tests/commands/test_store_commands.py::test_publishlib_store_is_too_behind",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_same_hash",
"tests/commands/test_store_commands.py::test_publishlib_store_has_same_revision_other_hash",
"tests/commands/test_store_commands.py::test_getlibinfo_success_simple",
"tests/commands/test_store_commands.py::test_getlibinfo_success_content",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[charms.testcharm.v3.testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[charms.testcharm.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[testcharm.v2.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_name[mycharms.testcharm.v2.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib.html]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/v3/testlib.]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[charms/testcharm/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[testcharm/v2/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_path[mycharms/testcharm/v2/testlib.py]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.v-three.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.v-3.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.3.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_bad_api[charms.testcharm.vX.testlib]",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_library_from_name",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_library_from_path",
"tests/commands/test_store_commands.py::test_getlibinfo_malformed_metadata_field",
"tests/commands/test_store_commands.py::test_getlibinfo_missing_metadata_field",
"tests/commands/test_store_commands.py::test_getlibinfo_api_not_int",
"tests/commands/test_store_commands.py::test_getlibinfo_api_negative",
"tests/commands/test_store_commands.py::test_getlibinfo_patch_not_int",
"tests/commands/test_store_commands.py::test_getlibinfo_patch_negative",
"tests/commands/test_store_commands.py::test_getlibinfo_api_patch_both_zero",
"tests/commands/test_store_commands.py::test_getlibinfo_metadata_api_different_path_api",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_non_string",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_non_ascii",
"tests/commands/test_store_commands.py::test_getlibinfo_libid_empty",
"tests/commands/test_store_commands.py::test_fetchlib_simple_downloaded",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name",
"tests/commands/test_store_commands.py::test_fetchlib_simple_dash_in_name_on_disk",
"tests/commands/test_store_commands.py::test_fetchlib_simple_updated",
"tests/commands/test_store_commands.py::test_fetchlib_all",
"tests/commands/test_store_commands.py::test_fetchlib_store_not_found",
"tests/commands/test_store_commands.py::test_fetchlib_store_is_old",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_same_hash",
"tests/commands/test_store_commands.py::test_fetchlib_store_same_versions_different_hash",
"tests/commands/test_store_commands.py::test_listlib_simple",
"tests/commands/test_store_commands.py::test_listlib_charm_from_metadata",
"tests/commands/test_store_commands.py::test_listlib_name_from_metadata_problem",
"tests/commands/test_store_commands.py::test_listlib_empty",
"tests/commands/test_store_commands.py::test_listlib_properly_sorted",
"tests/commands/test_store_commands.py::test_resources_simple",
"tests/commands/test_store_commands.py::test_resources_empty",
"tests/commands/test_store_commands.py::test_resources_ordered_and_grouped",
"tests/commands/test_store_commands.py::test_uploadresource_options_filepath_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_image_type",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_good_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs0]",
"tests/commands/test_store_commands.py::test_uploadresource_options_bad_combinations[sysargs1]",
"tests/commands/test_store_commands.py::test_uploadresource_filepath_call_ok",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_already_uploaded",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_upload_from_local",
"tests/commands/test_store_commands.py::test_uploadresource_image_call_missing_everywhere",
"tests/commands/test_store_commands.py::test_uploadresource_call_error",
"tests/commands/test_store_commands.py::test_resourcerevisions_simple",
"tests/commands/test_store_commands.py::test_resourcerevisions_empty",
"tests/commands/test_store_commands.py::test_resourcerevisions_ordered_by_revision"
] | [] | Apache License 2.0 | 12,344 | 262 | [
"charmcraft/commands/store/__init__.py"
] |
|
open-dicom__dicom_parser-93 | ec61d42824b8da34bc3b0feb212df6d65575e46c | 2022-03-04 08:50:02 | 33bd29439165255af061d3690113e252f23e7111 | diff --git a/src/dicom_parser/messages.py b/src/dicom_parser/messages.py
index 48bd241..604293b 100644
--- a/src/dicom_parser/messages.py
+++ b/src/dicom_parser/messages.py
@@ -16,5 +16,6 @@ UNREGISTERED_MODALITY: str = (
"No sequence identifiers registered for {modality}!"
)
MISSING_HEADER_INFO: str = "No header information found for {modality} sequence detection using {keys}!"
+MISSING_SERIES_SOURCE: str = "Series instances must be initialized with a path or an iterable of image instances!"
# flake8: noqa: E501
diff --git a/src/dicom_parser/series.py b/src/dicom_parser/series.py
index e2f3692..7915b69 100644
--- a/src/dicom_parser/series.py
+++ b/src/dicom_parser/series.py
@@ -2,10 +2,11 @@
Definition of the :class:`Series` class.
"""
from pathlib import Path
-from typing import Any, Generator, Tuple
+from typing import Any, Generator, Iterable, Optional, Tuple
import numpy as np
+from dicom_parser import messages
from dicom_parser.image import Image
from dicom_parser.messages import (
EMPTY_SERIES_DIRECTORY,
@@ -22,25 +23,43 @@ class Series:
from a single directory and ordered by InstanceNumber.
"""
- def __init__(self, path: Path, mime: bool = False):
+ def __init__(
+ self,
+ path: Optional[Path] = None,
+ images: Optional[Iterable[Image]] = None,
+ mime: bool = False,
+ ):
"""
The Series class should be initialized with a string or a
:class:`~pathlib.Path` instance representing the path of single
- `DICOM series`_.
+ `DICOM series`_, or an iterable or :class:`dicom_parser.image.Image`
+ instances (one of *path* or *images* must be provided).
.. _DICOM series:
https://dcm4che.atlassian.net/wiki/spaces/d2/pages/1835038/A+Very+Basic+DICOM+Introduction
Parameters
----------
- path : :class:`~pathlib.Path` or str
- Directory containing .dcm files.
+ path : :class:`~pathlib.Path` or str, optional
+ Directory containing .dcm files
+ images : Iterable[Image], optional
+ Image instances that make up the series
mime : bool, optional
Whether to find DICOM images by file mime type instead of
extension, defaults to False
"""
- self.path = self.check_path(path)
- self.images = self.get_images(mime)
+ # Find images in series directory path, if provided.
+ if isinstance(path, (Path, str)):
+ self.path = self.check_path(path)
+ self.images = self.get_images(mime)
+ # Tupelize any iterable of images.
+ elif images is not None:
+ self.images = tuple(images)
+ # Otherwise, raise an exception.
+ else:
+ raise ValueError(messages.MISSING_SERIES_SOURCE)
+
+ # Pixel array cache.
self._data = None
def __len__(self) -> int:
| Enable instantiating a Series using an iterable of Image instances
Instead of only allowing a path, enable passing an iterable of `Image` instances directly. Images should share the *SeriesInstanceUID* and be sorted by *InstanceNumber*. | open-dicom/dicom_parser | diff --git a/tests/test_series.py b/tests/test_series.py
index 8e6e840..25419c9 100644
--- a/tests/test_series.py
+++ b/tests/test_series.py
@@ -168,3 +168,13 @@ class SeriesTestCase(TestCase):
series = Series(TEST_SERIES_PATH)
value = series.spatial_resolution
self.assertTupleEqual(value, SERIES_SPATIAL_RESOLUTION)
+
+ def test_series_from_images(self):
+ images = [Image(dcm) for dcm in Path(TEST_SERIES_PATH).rglob("*.dcm")]
+ from_images = Series(images=images)
+ from_path = Series(TEST_SERIES_PATH)
+ self.assertEqual(len(from_images), len(from_path))
+
+ def test_series_init_without_path_or_images_raises_valueerror(self):
+ with self.assertRaises(ValueError):
+ Series()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"coverage",
"tox-gh-actions"
],
"pre_install": [
"apt-get update",
"apt-get install -y libmagic1"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
coverage==5.5
-e git+https://github.com/open-dicom/dicom_parser.git@ec61d42824b8da34bc3b0feb212df6d65575e46c#egg=dicom_parser
distlib==0.3.9
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
numpy==1.26.4
packaging @ file:///croot/packaging_1734472117206/work
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
py==1.11.0
pydicom==2.1.2
pyproject-api==1.9.0
pytest==6.2.5
python-magic==0.4.27
six==1.17.0
toml==0.10.2
tomli==2.2.1
tox==3.28.0
tox-gh-actions==3.3.0
typing_extensions==4.13.0
virtualenv==20.29.3
| name: dicom_parser
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- coverage==5.5
- dicom-parser==1.2.1
- distlib==0.3.9
- filelock==3.18.0
- numpy==1.26.4
- platformdirs==4.3.7
- py==1.11.0
- pydicom==2.1.2
- pyproject-api==1.9.0
- pytest==6.2.5
- python-magic==0.4.27
- six==1.17.0
- toml==0.10.2
- tomli==2.2.1
- tox==3.28.0
- tox-gh-actions==3.3.0
- typing-extensions==4.13.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/dicom_parser
| [
"tests/test_series.py::SeriesTestCase::test_series_from_images",
"tests/test_series.py::SeriesTestCase::test_series_init_without_path_or_images_raises_valueerror"
] | [] | [
"tests/test_series.py::SeriesTestCase::test_data_property",
"tests/test_series.py::SeriesTestCase::test_get_images_got_correct_number_of_images",
"tests/test_series.py::SeriesTestCase::test_get_method_with_missing_keyword",
"tests/test_series.py::SeriesTestCase::test_get_method_with_missing_keyword_and_default",
"tests/test_series.py::SeriesTestCase::test_get_method_with_multiple_values_keyword",
"tests/test_series.py::SeriesTestCase::test_get_method_with_multiple_values_tuple",
"tests/test_series.py::SeriesTestCase::test_get_method_with_single_value_keyword",
"tests/test_series.py::SeriesTestCase::test_get_method_with_single_value_tuple",
"tests/test_series.py::SeriesTestCase::test_get_spatial_resolution",
"tests/test_series.py::SeriesTestCase::test_get_spatial_resolution_without_slice_thickness",
"tests/test_series.py::SeriesTestCase::test_images_are_ordered_by_instance_number",
"tests/test_series.py::SeriesTestCase::test_indexing_operator_with_int_returns_correct_instance",
"tests/test_series.py::SeriesTestCase::test_indexing_operator_with_int_returns_image_instance",
"tests/test_series.py::SeriesTestCase::test_indexing_operator_with_invalid_key_raises_key_error",
"tests/test_series.py::SeriesTestCase::test_indexing_operator_with_invalid_type_raises_type_error",
"tests/test_series.py::SeriesTestCase::test_indexing_operator_with_slice_returns_multiple_images",
"tests/test_series.py::SeriesTestCase::test_indexing_operator_with_string",
"tests/test_series.py::SeriesTestCase::test_indexing_operator_with_tag_and_multiple_values",
"tests/test_series.py::SeriesTestCase::test_initialization_with_file_path_raises_value_error",
"tests/test_series.py::SeriesTestCase::test_initialization_with_invalid_path_raises_value_error",
"tests/test_series.py::SeriesTestCase::test_initialization_with_no_dcms_in_path_raises_file_not_found_error",
"tests/test_series.py::SeriesTestCase::test_initialization_with_pathlib_path",
"tests/test_series.py::SeriesTestCase::test_initialization_with_string_path",
"tests/test_series.py::SeriesTestCase::test_len",
"tests/test_series.py::SeriesTestCase::test_mosaic_series_data_same_as_nifti",
"tests/test_series.py::SeriesTestCase::test_mosaic_series_returns_as_4d",
"tests/test_series.py::SeriesTestCase::test_spatial_resolution"
] | [] | MIT License | 12,346 | 763 | [
"src/dicom_parser/messages.py",
"src/dicom_parser/series.py"
] |
|
Dorthu__openapi3-75 | 0e02198948258b69e666f53aa2b22fcf4da62f97 | 2022-03-04 16:40:29 | 7039a15e65477b505467b85bcd09215645f3ec9a | diff --git a/openapi3/openapi.py b/openapi3/openapi.py
index 5b79b14..0b2c753 100644
--- a/openapi3/openapi.py
+++ b/openapi3/openapi.py
@@ -101,16 +101,27 @@ class OpenAPI(ObjectBase):
node = self
for part in path:
+ part = part.replace('~1','/').replace('~0','~')
if isinstance(node, Map):
- if part not in node: # pylint: disable=unsupported-membership-test
+ try:
+ node = node[part]
+ except KeyError:
err_msg = "Invalid path {} in Reference".format(path)
raise ReferenceResolutionError(err_msg)
- node = node.get(part)
else:
- if not hasattr(node, part):
+ try:
+ ipart = int(part)
+ except ValueError:
+ pass
+ else:
+ if ipart>=0 and ipart<len(node):
+ node = node[ipart]
+ continue
+ try:
+ node = getattr(node, part)
+ except AttributeError:
err_msg = "Invalid path {} in Reference".format(path)
raise ReferenceResolutionError(err_msg)
- node = getattr(node, part)
return node
| Doesn't resolve JSON path escape codes
RFC 6901 says that `~1` must be de-escaped to `/` and `~0` is de-escaped to `~` (in this order). | Dorthu/openapi3 | diff --git a/tests/conftest.py b/tests/conftest.py
index 5a10c1b..b4e83cf 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -178,3 +178,11 @@ def schema_without_properties():
Provides a spec that includes a schema with no properties defined
"""
yield _get_parsed_yaml("schema-without-properties.yaml")
+
+
[email protected]
+def rfc_6901():
+ """
+ Provides a spec that includes RFC 6901 escape codes in ref paths
+ """
+ yield _get_parsed_yaml("rfc_6901.yaml")
diff --git a/tests/fixtures/rfc_6901.yaml b/tests/fixtures/rfc_6901.yaml
new file mode 100644
index 0000000..3b85030
--- /dev/null
+++ b/tests/fixtures/rfc_6901.yaml
@@ -0,0 +1,79 @@
+# this schema has refs whose paths include the escaped `~` and `/` characters
+# (escaped as ~0 and ~1 respectively). This also purposefully includes the ~01
+# escape sequence to ensure parsing ends in `~1` and not `/`
+openapi: "3.1.0"
+info:
+ version: 1.0.0
+ title: RFC 6901 Test
+paths:
+ /ref-test:
+ parameters:
+ - $ref: '#/paths/~1parameters-holder/parameters/1'
+ get:
+ operationId: refTestGet
+ responses:
+ '200':
+ description: Test
+ content:
+ application/json:
+ schema:
+ description: |
+ References all other fields in components/schemas to ensure all references
+ are tested.
+ type: object
+ properties:
+ one:
+ $ref: '#/components/schemas/test~1one'
+ two:
+ $ref: '#/components/schemas/test~0two'
+ three:
+ $ref: '#/components/schemas/test~01three'
+ four:
+ $ref: '#/components/schemas/01/properties/example'
+ /parameters-holder:
+ parameters:
+ - name: example
+ in: query
+ schema:
+ type: int
+ - name: example2
+ in: query
+ schema:
+ type: int
+ get:
+ operationId: parametersHolderGet
+ responses:
+ '200':
+ description: Placeholder
+ content:
+ application/json:
+ schema:
+ type: object
+components:
+ schemas:
+ test/one:
+ description: |
+ Tests that refs can reference paths with a `/` character; this should be
+ escaped as `#/components/schemas/test~1one`
+ type: string
+ test~two:
+ description: |
+ Tests that refs can reference paths with a `~` character; this should be
+ escaped as `#/components/schemas/test~0two`
+ type: int
+ test~1three:
+ description: |
+ Tests that refs can reference paths with a ~1 sequence in them; this should
+ be escaped as `#/components/schemas/test~01three`
+ type: array
+ items:
+ type: string
+ '01':
+ description: |
+ Tests that paths parsed using integer-like segments are handled correctly.
+ This will be referenced as `#/components/schemas/0/properties/example`
+ type: object
+ properties:
+ example:
+ type: string
+ example: it worked
diff --git a/tests/ref_test.py b/tests/ref_test.py
index f7999be..3a15f5f 100644
--- a/tests/ref_test.py
+++ b/tests/ref_test.py
@@ -84,7 +84,7 @@ def test_ref_allof_handling(with_ref_allof):
spec = OpenAPI(with_ref_allof)
referenced_schema = spec.components.schemas['Example']
- # this should have only one property; the allOf from
+ # this should have only one property; the allOf from
# paths['/allof-example']get.responses['200'].content['application/json'].schema
# should not modify the component
assert len(referenced_schema.properties) == 1, \
@@ -92,3 +92,26 @@ def test_ref_allof_handling(with_ref_allof):
len(referenced_schema.properties),
", ".join(referenced_schema.properties.keys()),
)
+
+def test_ref_6901_refs(rfc_6901):
+ """
+ Tests that RFC 6901 escape codes, such as ~0 and ~1, are pared correctly
+ """
+ spec = OpenAPI(rfc_6901, validate=True)
+ assert len(spec.errors()) == 0, spec.errors()
+
+ # spec parsed, make sure our refs got the right values
+ path = spec.paths['/ref-test']
+ response = path.get.responses['200'].content['application/json'].schema
+
+ assert response.properties['one'].type == 'string'
+ assert response.properties['two'].type == 'int'
+ assert response.properties['three'].type == 'array'
+
+ # ensure the integer path components parsed as expected too
+ assert response.properties['four'].type == 'string'
+ assert response.properties['four'].example == 'it worked'
+
+ # ensure integer path parsing does work as expected
+ assert len(path.parameters) == 1
+ assert path.parameters[0].name == 'example2'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
-e git+https://github.com/Dorthu/openapi3.git@0e02198948258b69e666f53aa2b22fcf4da62f97#egg=openapi3
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
PyYAML==6.0.2
requests==2.32.3
tomli==2.2.1
urllib3==2.3.0
| name: openapi3
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pyyaml==6.0.2
- requests==2.32.3
- tomli==2.2.1
- urllib3==2.3.0
prefix: /opt/conda/envs/openapi3
| [
"tests/ref_test.py::test_ref_6901_refs"
] | [] | [
"tests/ref_test.py::test_ref_resolution",
"tests/ref_test.py::test_allOf_resolution",
"tests/ref_test.py::test_resolving_nested_allof_ref",
"tests/ref_test.py::test_ref_allof_handling"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,349 | 300 | [
"openapi3/openapi.py"
] |
|
algorand__pyteal-234 | f2598da3dec6041051df4442cd9cc7e3c373674d | 2022-03-05 00:18:42 | f2598da3dec6041051df4442cd9cc7e3c373674d | diff --git a/pyteal/compiler/subroutines.py b/pyteal/compiler/subroutines.py
index 1b5e63e..78bfa7a 100644
--- a/pyteal/compiler/subroutines.py
+++ b/pyteal/compiler/subroutines.py
@@ -94,7 +94,6 @@ def spillLocalSlotsDuringRecursion(
for subroutine, reentryPoints in recursivePoints.items():
slots = list(sorted(slot for slot in localSlots[subroutine]))
- numArgs = subroutine.argumentCount()
if len(reentryPoints) == 0 or len(slots) == 0:
# no need to spill slots
@@ -107,13 +106,26 @@ def spillLocalSlotsDuringRecursion(
before: List[TealComponent] = []
after: List[TealComponent] = []
- if len(reentryPoints.intersection(stmt.getSubroutines())) != 0:
+ calledSubroutines = stmt.getSubroutines()
+ # the only opcode that references subroutines is callsub, and it should only ever
+ # reference one subroutine at a time
+ assert (
+ len(calledSubroutines) <= 1
+ ), "Multiple subroutines are called from the same TealComponent"
+
+ reentrySubroutineCalls = list(reentryPoints.intersection(calledSubroutines))
+ if len(reentrySubroutineCalls) != 0:
# A subroutine is being called which may reenter the current subroutine, so insert
# ops to spill local slots to the stack before calling the subroutine and also to
# restore the local slots after returning from the subroutine. This prevents a
# reentry into the current subroutine from modifying variables we are currently
# using.
+ # reentrySubroutineCalls should have a length of 1, since calledSubroutines has a
+ # maximum length of 1
+ reentrySubroutineCall = reentrySubroutineCalls[0]
+ numArgs = reentrySubroutineCall.argumentCount()
+
digArgs = True
coverSpilledSlots = False
uncoverArgs = False
| Incorrect TEAL code produced for mutually recursive subroutines with different argument counts
## Summary
There is a bug in the implementation of [`spillLocalSlotsDuringRecursion`](https://github.com/algorand/pyteal/blob/f2598da3dec6041051df4442cd9cc7e3c373674d/pyteal/compiler/subroutines.py#L65-L204) that causes incorrect TEAL code to be generated for mutually recursive subroutines with different argument counts.
The problem is that the variable `numArgs` is the number of arguments of the calling subroutine, **NOT** the number of arguments in the called subroutine. This will cause incorrect slot spilling and restoring code to be generated, in some cases producing an infinite recursive loop.
## Example
Below is an example of a PyTeal that's affected by this bug:
```python
from pyteal import *
@Subroutine(TealType.uint64)
def factorial(i: Expr) -> Expr:
return If(i <= Int(1), Int(1), factorial_intermediate(i - Int(1), Bytes("inconsequential")) * i)
@Subroutine(TealType.uint64)
def factorial_intermediate(i: Expr, j: Expr) -> Expr:
return Seq(Log(j), factorial(i))
program = Return(factorial(Int(4)) == Int(24))
```
It currently produces the following **incorrect** TEAL code (with PyTeal v0.10.0), with my commentary:
```
#pragma version 5
int 4
callsub factorial_0
int 24
==
return
// factorial
factorial_0:
store 0
load 0
int 1
<=
bnz factorial_0_l2
load 0
int 1
-
byte "inconsequential"
load 0
swap // the compiler thinks factorialintermediate_1 only takes 1 argument, but it takes 2. Only one value is uncovered
callsub factorialintermediate_1
swap
store 0
load 0
*
b factorial_0_l3
factorial_0_l2:
int 1
factorial_0_l3:
retsub
// factorial_intermediate
factorialintermediate_1:
store 2
store 1
load 2
log
load 1
load 1
load 2
uncover 3
uncover 3 // the compiler thinks factorial_0 takes 2 arguments, but it only takes 1. More values are uncovered than needed
callsub factorial_0
cover 2
store 2
store 1
retsub
``` | algorand/pyteal | diff --git a/pyteal/compiler/compiler_test.py b/pyteal/compiler/compiler_test.py
index 7a36fcf..74b5799 100644
--- a/pyteal/compiler/compiler_test.py
+++ b/pyteal/compiler/compiler_test.py
@@ -1153,7 +1153,7 @@ retsub
assert actual == expected
-def test_compile_subroutine_mutually_recursive():
+def test_compile_subroutine_mutually_recursive_4():
@Subroutine(TealType.uint64)
def isEven(i: Expr) -> Expr:
return If(i == Int(0), Int(1), Not(isOdd(i - Int(1))))
@@ -1285,6 +1285,147 @@ retsub
assert actual == expected
+def test_compile_subroutine_mutually_recursive_different_arg_count_4():
+ @Subroutine(TealType.uint64)
+ def factorial(i: Expr) -> Expr:
+ return If(
+ i <= Int(1),
+ Int(1),
+ factorial_intermediate(i - Int(1), Bytes("inconsequential")) * i,
+ )
+
+ @Subroutine(TealType.uint64)
+ def factorial_intermediate(i: Expr, j: Expr) -> Expr:
+ return Seq(Pop(j), factorial(i))
+
+ program = Return(factorial(Int(4)) == Int(24))
+
+ expected = """#pragma version 4
+int 4
+callsub factorial_0
+int 24
+==
+return
+
+// factorial
+factorial_0:
+store 0
+load 0
+int 1
+<=
+bnz factorial_0_l2
+load 0
+int 1
+-
+byte "inconsequential"
+load 0
+dig 2
+dig 2
+callsub factorialintermediate_1
+swap
+store 0
+swap
+pop
+swap
+pop
+load 0
+*
+b factorial_0_l3
+factorial_0_l2:
+int 1
+factorial_0_l3:
+retsub
+
+// factorial_intermediate
+factorialintermediate_1:
+store 2
+store 1
+load 2
+pop
+load 1
+load 1
+load 2
+dig 2
+callsub factorial_0
+store 1
+store 2
+load 1
+swap
+store 1
+swap
+pop
+retsub
+ """.strip()
+ actual = compileTeal(program, Mode.Application, version=4, assembleConstants=False)
+ assert actual == expected
+
+
+def test_compile_subroutine_mutually_recursive_different_arg_count_5():
+ @Subroutine(TealType.uint64)
+ def factorial(i: Expr) -> Expr:
+ return If(
+ i <= Int(1),
+ Int(1),
+ factorial_intermediate(i - Int(1), Bytes("inconsequential")) * i,
+ )
+
+ @Subroutine(TealType.uint64)
+ def factorial_intermediate(i: Expr, j: Expr) -> Expr:
+ return Seq(Log(j), factorial(i))
+
+ program = Return(factorial(Int(4)) == Int(24))
+
+ expected = """#pragma version 5
+int 4
+callsub factorial_0
+int 24
+==
+return
+
+// factorial
+factorial_0:
+store 0
+load 0
+int 1
+<=
+bnz factorial_0_l2
+load 0
+int 1
+-
+byte "inconsequential"
+load 0
+cover 2
+callsub factorialintermediate_1
+swap
+store 0
+load 0
+*
+b factorial_0_l3
+factorial_0_l2:
+int 1
+factorial_0_l3:
+retsub
+
+// factorial_intermediate
+factorialintermediate_1:
+store 2
+store 1
+load 2
+log
+load 1
+load 1
+load 2
+uncover 2
+callsub factorial_0
+cover 2
+store 2
+store 1
+retsub
+ """.strip()
+ actual = compileTeal(program, Mode.Application, version=5, assembleConstants=False)
+ assert actual == expected
+
+
def test_compile_loop_in_subroutine():
@Subroutine(TealType.none)
def setState(value: Expr) -> Expr:
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
black==21.7b0
cffi==1.17.1
click==8.1.8
exceptiongroup==1.2.2
iniconfig==2.1.0
msgpack==1.1.0
mypy==0.931
mypy-extensions==1.0.0
packaging==24.2
pathspec==0.12.1
pluggy==1.5.0
py-algorand-sdk==2.6.1
pycparser==2.22
pycryptodomex==3.22.0
PyNaCl==1.5.0
-e git+https://github.com/algorand/pyteal.git@f2598da3dec6041051df4442cd9cc7e3c373674d#egg=pyteal
pytest==8.3.5
pytest-timeout==2.3.1
regex==2024.11.6
tomli==1.2.3
typing_extensions==4.13.0
| name: pyteal
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- black==21.7b0
- cffi==1.17.1
- click==8.1.8
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- msgpack==1.1.0
- mypy==0.931
- mypy-extensions==1.0.0
- packaging==24.2
- pathspec==0.12.1
- pluggy==1.5.0
- py-algorand-sdk==2.6.1
- pycparser==2.22
- pycryptodomex==3.22.0
- pynacl==1.5.0
- pytest==8.3.5
- pytest-timeout==2.3.1
- regex==2024.11.6
- tomli==1.2.3
- typing-extensions==4.13.0
prefix: /opt/conda/envs/pyteal
| [
"pyteal/compiler/compiler_test.py::test_compile_subroutine_mutually_recursive_different_arg_count_4",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_mutually_recursive_different_arg_count_5"
] | [] | [
"pyteal/compiler/compiler_test.py::test_compile_single",
"pyteal/compiler/compiler_test.py::test_compile_sequence",
"pyteal/compiler/compiler_test.py::test_compile_branch",
"pyteal/compiler/compiler_test.py::test_compile_branch_multiple",
"pyteal/compiler/compiler_test.py::test_empty_branch",
"pyteal/compiler/compiler_test.py::test_compile_mode",
"pyteal/compiler/compiler_test.py::test_compile_version_invalid",
"pyteal/compiler/compiler_test.py::test_compile_version_2",
"pyteal/compiler/compiler_test.py::test_compile_version_default",
"pyteal/compiler/compiler_test.py::test_compile_version_3",
"pyteal/compiler/compiler_test.py::test_compile_version_4",
"pyteal/compiler/compiler_test.py::test_compile_version_5",
"pyteal/compiler/compiler_test.py::test_compile_version_6",
"pyteal/compiler/compiler_test.py::test_slot_load_before_store",
"pyteal/compiler/compiler_test.py::test_assign_scratch_slots",
"pyteal/compiler/compiler_test.py::test_scratchvar_double_assign_invalid",
"pyteal/compiler/compiler_test.py::test_assembleConstants",
"pyteal/compiler/compiler_test.py::test_compile_while",
"pyteal/compiler/compiler_test.py::test_compile_for",
"pyteal/compiler/compiler_test.py::test_compile_break",
"pyteal/compiler/compiler_test.py::test_compile_continue",
"pyteal/compiler/compiler_test.py::test_compile_continue_break_nested",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_unsupported",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_no_return",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_with_return",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_many_args",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_recursive",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_recursive_5",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_recursive_multiple_args",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_recursive_multiple_args_5",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_mutually_recursive_4",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_mutually_recursive_5",
"pyteal/compiler/compiler_test.py::test_compile_loop_in_subroutine",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_invalid_name",
"pyteal/compiler/compiler_test.py::test_compile_subroutine_assemble_constants",
"pyteal/compiler/compiler_test.py::test_compile_wide_ratio"
] | [] | MIT License | 12,351 | 484 | [
"pyteal/compiler/subroutines.py"
] |
|
karlicoss__promnesia-277 | 90ba0d15ac7d04b365363821adb0f722534086f3 | 2022-03-05 03:44:19 | 90ba0d15ac7d04b365363821adb0f722534086f3 | diff --git a/src/promnesia/cannon.py b/src/promnesia/cannon.py
index a3a303f..b76cb9f 100755
--- a/src/promnesia/cannon.py
+++ b/src/promnesia/cannon.py
@@ -105,11 +105,13 @@ default_qkeep = [
# TODO perhaps, decide if fragment is meaningful (e.g. wiki) or random sequence of letters?
class Spec(NamedTuple):
- qkeep : Optional[Collection[str]] = None
+ qkeep : Optional[Union[Collection[str], bool]] = None
qremove: Optional[Set[str]] = None
fkeep : bool = False
def keep_query(self, q: str) -> Optional[int]: # returns order
+ if self.qkeep is True:
+ return 1
qkeep = {
q: i for i, q in enumerate(chain(default_qkeep, self.qkeep or []))
}
@@ -183,6 +185,7 @@ specs: Dict[str, Spec] = {
'ycombinator.com' : S(qkeep={'id'}), # todo just keep id by default?
'play.google.com' : S(qkeep={'id'}),
'answers.yahoo.com' : S(qkeep={'qid'}),
+ 'isfdb.org': S(qkeep=True),
}
_def_spec = S()
@@ -271,7 +274,7 @@ def transform_split(split: SplitResult):
netloc = canonify_domain(split.netloc)
path = split.path
- qparts = parse_qsl(split.query)
+ qparts = parse_qsl(split.query, keep_blank_values=True)
fragment = split.fragment
@@ -319,7 +322,7 @@ def transform_split(split: SplitResult):
to = to + ('', )
(netloc, path, qq) = [t.format(**gd) for t in to]
- qparts.extend(parse_qsl(qq)) # TODO hacky..
+ qparts.extend(parse_qsl(qq, keep_blank_values=True)) # TODO hacky..
# TODO eh, qparts should really be a map or something...
break
| Handle query parameters without values
Right now, pages on isfdb like `http://www.isfdb.org/cgi-bin/title.cgi?2172` are canonicalized to `http://www.isfdb.org/cgi-bin/title.cgi`, which removes important info from the URL.
I made a somewhat hacky solution, but there is perhaps a more elegant way. The solution is in two parts:
First, modify `Spec` to accept `qkeep=True`, which is taken to mean "retain all query parameters", and have isfdb.org use that option.
```
@@ -105,11 +105,13 @@ default_qkeep = [
# TODO perhaps, decide if fragment is meaningful (e.g. wiki) or random sequence of letters?
class Spec(NamedTuple):
- qkeep : Optional[Collection[str]] = None
+ qkeep : Optional[Union[Collection[str], bool]] = None
qremove: Optional[Set[str]] = None
fkeep : bool = False
def keep_query(self, q: str) -> Optional[int]: # returns order
+ if self.qkeep is True:
+ return 1
qkeep = {
q: i for i, q in enumerate(chain(default_qkeep, self.qkeep or []))
}
@@ -183,6 +185,7 @@ specs: Dict[str, Spec] = {
'ycombinator.com' : S(qkeep={'id'}), # todo just keep id by default?
'play.google.com' : S(qkeep={'id'}),
'answers.yahoo.com' : S(qkeep={'qid'}),
+ 'isfdb.org': S(qkeep=True),
}
_def_spec = S()
```
Second, pass, `keep_blank_values=True` to `parse_qsl`.
```
@@ -271,7 +274,7 @@ def transform_split(split: SplitResult):
netloc = canonify_domain(split.netloc)
path = split.path
- qparts = parse_qsl(split.query)
+ qparts = parse_qsl(split.query, keep_blank_values=True)
fragment = split.fragment
@@ -319,7 +322,7 @@ def transform_split(split: SplitResult):
to = to + ('', )
(netloc, path, qq) = [t.format(**gd) for t in to]
- qparts.extend(parse_qsl(qq)) # TODO hacky..
+ qparts.extend(parse_qsl(qq, keep_blank_values=True)) # TODO hacky..
# TODO eh, qparts should really be a map or something...
break
```
This achieves the desired result, but it's unpleasantly hacky.
First problem: this will not guarantee the order of query parameters. But is that important? Why doesn't the code just always alphabetize them? There's a note for youtube that "order matters here", but it seems to me that youtube doesn't care what order the parameters are in.
Second problem: maybe you usually don't want empty parameters kept. It could complicate things to add logic around this, but maybe something like a `keep_blank_values` property on Spec would do the job.
Third problem: it's not great to have to dig around in the program code to fix this behavior. Maybe it could be patched in from my `config.py`, once the supporting code is in place, which something like:
```python
from promnesia.cannon import specs, S
specs['isfdb.org'] = S(qkeep=True)
```
Really, all of `cannon.py` is a bit intimidating to modify (and, incidentally, shouldn't it be `canon.py`?). I'm not sure what's the right direction to go. | karlicoss/promnesia | diff --git a/tests/cannon.py b/tests/cannon.py
index d76693b..5cd6ef5 100644
--- a/tests/cannon.py
+++ b/tests/cannon.py
@@ -294,3 +294,19 @@ def test_error():
with pytest.raises(CanonifyException):
# borrowed from https://bugs.mageia.org/show_bug.cgi?id=24640#c7
canonify('https://example.com\[email protected]')
+
[email protected]("url,expected", [
+ ('https://news.ycombinator.com/item?id=', 'news.ycombinator.com/item?id='),
+ ('https://www.youtube.com/watch?v=hvoQiF0kBI8&list&index=2',
+ 'youtube.com/watch?v=hvoQiF0kBI8&list='),
+])
+def test_empty_query_parameter(url, expected):
+ assert canonify(url) == expected
+
[email protected]("url,expected", [
+ ('http://www.isfdb.org/cgi-bin/title.cgi?2172', 'isfdb.org/cgi-bin/title.cgi?2172='),
+ ('http://www.isfdb.org/cgi-bin/title.cgi?2172+1', 'isfdb.org/cgi-bin/title.cgi?2172%201='),
+ ('http://www.isfdb.org/cgi-bin/title.cgi?2172&foo=bar&baz&quux', 'isfdb.org/cgi-bin/title.cgi?2172=&baz=&foo=bar&quux='),
+])
+def test_qkeep_true(url, expected):
+ assert canonify(url) == expected
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-xdist",
"pytest-cov",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alembic==1.15.2
appdirs==1.4.4
banal==1.0.6
beautifulsoup4==4.13.3
cachew==0.19.20250118
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
coverage==7.8.0
dataset==1.6.2
decorator==5.2.1
exceptiongroup==1.2.2
execnet==2.1.1
falcon==2.0.0
filelock==3.18.0
greenlet==3.1.1
HPI==0.5.20241019
hug==2.6.1
idna==3.10
iniconfig==2.1.0
kompress==0.2.20250118
logzero==1.7.0
lxml==5.3.1
Mako==1.3.9
MarkupSafe==3.0.2
mistletoe==1.4.0
more-itertools==10.6.0
orgparse==0.4.20231004
orjson==3.10.16
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
-e git+https://github.com/karlicoss/promnesia.git@90ba0d15ac7d04b365363821adb0f722534086f3#egg=promnesia
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-magic==0.4.27
pytz==2025.2
requests==2.32.3
soupsieve==2.6
SQLAlchemy==1.4.54
tomli==2.2.1
typing_extensions==4.13.0
tzlocal==5.3.1
uritools==4.0.3
urlextract==1.9.0
urllib3==2.3.0
| name: promnesia
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alembic==1.15.2
- appdirs==1.4.4
- banal==1.0.6
- beautifulsoup4==4.13.3
- cachew==0.19.20250118
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- coverage==7.8.0
- dataset==1.6.2
- decorator==5.2.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- falcon==2.0.0
- filelock==3.18.0
- greenlet==3.1.1
- hpi==0.5.20241019
- hug==2.6.1
- idna==3.10
- iniconfig==2.1.0
- kompress==0.2.20250118
- logzero==1.7.0
- lxml==5.3.1
- mako==1.3.9
- markupsafe==3.0.2
- mistletoe==1.4.0
- more-itertools==10.6.0
- orgparse==0.4.20231004
- orjson==3.10.16
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-magic==0.4.27
- pytz==2025.2
- requests==2.32.3
- soupsieve==2.6
- sqlalchemy==1.4.54
- tomli==2.2.1
- typing-extensions==4.13.0
- tzlocal==5.3.1
- uritools==4.0.3
- urlextract==1.9.0
- urllib3==2.3.0
prefix: /opt/conda/envs/promnesia
| [
"tests/cannon.py::test_empty_query_parameter[https://news.ycombinator.com/item?id=-news.ycombinator.com/item?id=]",
"tests/cannon.py::test_empty_query_parameter[https://www.youtube.com/watch?v=hvoQiF0kBI8&list&index=2-youtube.com/watch?v=hvoQiF0kBI8&list=]",
"tests/cannon.py::test_qkeep_true[http://www.isfdb.org/cgi-bin/title.cgi?2172-isfdb.org/cgi-bin/title.cgi?2172=]",
"tests/cannon.py::test_qkeep_true[http://www.isfdb.org/cgi-bin/title.cgi?2172+1-isfdb.org/cgi-bin/title.cgi?2172%201=]",
"tests/cannon.py::test_qkeep_true[http://www.isfdb.org/cgi-bin/title.cgi?2172&foo=bar&baz&quux-isfdb.org/cgi-bin/title.cgi?2172=&baz=&foo=bar&quux=]"
] | [] | [
"tests/cannon.py::test_youtube[https://www.youtube.com/watch?t=491s&v=1NHbPN9pNPM&index=63&list=WL-youtube.com/watch?v=1NHbPN9pNPM&t=491s&list=WL]",
"tests/cannon.py::test_youtube[youtube.com/watch?v=wHrCkyoe72U&feature=share&time_continue=6-youtube.com/watch?v=wHrCkyoe72U]",
"tests/cannon.py::test_youtube[youtube.com/embed/nyc6RJEEe0U?feature=oembed-youtube.com/watch?v=nyc6RJEEe0U]",
"tests/cannon.py::test_youtube[https://youtu.be/iCvmsMzlF7o?list=WL-youtube.com/watch?v=iCvmsMzlF7o&list=WL]",
"tests/cannon.py::test_youtube[m.youtube.com/watch?v=Zn6gV2sdl38-youtube.com/watch?v=Zn6gV2sdl38]",
"tests/cannon.py::test_archiveorg[https://web.archive.org/web/20090902224414/http://reason.com/news/show/119237.html-reason.com/news/show/119237.html]",
"tests/cannon.py::test_hackernews[https://news.ycombinator.com/from?site=jacopo.io-jacopo.io]",
"tests/cannon.py::test_hackernews[https://news.ycombinator.com/item?id=25099862-news.ycombinator.com/item?id=25099862]",
"tests/cannon.py::test_reddit[https://www.reddit.com/r/firefox/comments/bbugc5/firefox_bans_free_speech_commenting_plugin/?ref=readnext-reddit.com/r/firefox/comments/bbugc5/firefox_bans_free_speech_commenting_plugin]",
"tests/cannon.py::test_reddit[https://www.reddit.com/r/selfhosted/comments/8j8mo3/what_are_you_self_hosting/dz19gh9/?utm_content=permalink&utm_medium=user&utm_source=reddit&utm_name=u_karlicoss-reddit.com/r/selfhosted/comments/8j8mo3/what_are_you_self_hosting/dz19gh9]",
"tests/cannon.py::test[https://github.com/search?o=asc&q=track&s=stars&type=Repositories-github.com/search?q=track]",
"tests/cannon.py::test[https://80000hours.org/career-decision/article/?utm_source=The+EA+Newsletter&utm_campaign=04ca3c2244-EMAIL_CAMPAIGN_2019_04_03_04_26&utm_medium=email&utm_term=0_51c1df13ac-04ca3c2244-318697649-80000hours.org/career-decision/article]",
"tests/cannon.py::test[https://www.facebook.com/photo.php?fbid=24147689823424326&set=pcb.2414778905423667&type=3&theater-facebook.com/photo.php?fbid=24147689823424326]",
"tests/cannon.py::test[https://play.google.com/store/apps/details?id=com.faultexception.reader&hl=en-play.google.com/store/apps/details?id=com.faultexception.reader]",
"tests/cannon.py::test[https://news.ycombinator.com/item?id=12172351-news.ycombinator.com/item?id=12172351]",
"tests/cannon.py::test[https://urbandictionary.com/define.php?term=Belgian%20Whistle-urbandictionary.com/define.php?term=Belgian%20Whistle]",
"tests/cannon.py::test[https://en.wikipedia.org/wiki/Dinic%27s_algorithm-en.wikipedia.org/wiki/Dinic%27s_algorithm]",
"tests/cannon.py::test[zoopla.co.uk/to-rent/details/42756337#D0zlBWeD4X85odsR.97-zoopla.co.uk/to-rent/details/42756337]",
"tests/cannon.py::test[withouthspec.co.uk/rooms/16867952?guests=2&adults=2&location=Berlin%2C+Germany&check_in=2017-08-16&check_out=2017-08-20-withouthspec.co.uk/rooms/16867952]",
"tests/cannon.py::test[amp.theguardian.com/technology/2017/oct/09/mark-zuckerberg-facebook-puerto-rico-virtual-reality-theguardian.com/technology/2017/oct/09/mark-zuckerberg-facebook-puerto-rico-virtual-reality]",
"tests/cannon.py::test[https://answers.yahoo.com/question/index?qid=20071101131442AAk9bGp-answers.yahoo.com/question/index?qid=20071101131442AAk9bGp]",
"tests/cannon.py::test[flowingdata.com/2010/12/14/10-best-data-visualization-projects-of-the-year-%e2%80%93-2010-flowingdata.com/2010/12/14/10-best-data-visualization-projects-of-the-year-%E2%80%93-2010]",
"tests/cannon.py::test[flowingdata.com/2010/12/14/10-best-data-visualization-projects-of-the-year-\\u2013-2010-flowingdata.com/2010/12/14/10-best-data-visualization-projects-of-the-year-%E2%80%93-2010]",
"tests/cannon.py::test[https://spoonuniversity.com/lifestyle/marmite-ways-to-eat-it&usg=AFQjCNH4s1SOEjlpENlfPV5nuvADZpSdow-spoonuniversity.com/lifestyle/marmite-ways-to-eat-it]",
"tests/cannon.py::test[https://google.co.uk/amp/s/amp.reddit.com/r/androidapps/comments/757e2t/swiftkey_or_gboard-reddit.com/r/androidapps/comments/757e2t/swiftkey_or_gboard]",
"tests/cannon.py::test[https://www.youtube.com/watch?v=hvoQiF0kBI8&list=WL&index=2-youtube.com/watch?v=hvoQiF0kBI8&list=WL]",
"tests/cannon.py::test[https://www.youtube.com/watch?list=WL&v=hvoQiF0kBI8&index=2-youtube.com/watch?v=hvoQiF0kBI8&list=WL]",
"tests/cannon.py::test[https://bbs.archlinux.org/viewtopic.php?id=212740-bbs.archlinux.org/viewtopic.php?id=212740]",
"tests/cannon.py::test[https://ubuntuforums.org/showthread.php?t=1403470&s=0dd67bdb12559c22e73a220752db50c7&p=8806195#post8806195-ubuntuforums.org/showthread.php?t=1403470&p=8806195]",
"tests/cannon.py::test[https://arstechnica.com/?p=1371299-arstechnica.com/?p=1371299]",
"tests/cannon.py::test_same_norm[urls0]",
"tests/cannon.py::test_same_norm[urls1]",
"tests/cannon.py::test_error"
] | [] | MIT License | 12,353 | 502 | [
"src/promnesia/cannon.py"
] |
|
opendatacube__datacube-core-1242 | 465c44edd6b50d14d388170805f397561cde3085 | 2022-03-08 02:40:08 | ecbab26ef501ceaff4c2d093d7e60b49bc4d764f | codecov[bot]: # [Codecov](https://codecov.io/gh/opendatacube/datacube-core/pull/1242?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=opendatacube) Report
> Merging [#1242](https://codecov.io/gh/opendatacube/datacube-core/pull/1242?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=opendatacube) (426e411) into [develop](https://codecov.io/gh/opendatacube/datacube-core/commit/2aee3af6da9e4843154efc695c22e35a1d66e704?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=opendatacube) (2aee3af) will **decrease** coverage by `0.00%`.
> The diff coverage is `92.10%`.
[](https://codecov.io/gh/opendatacube/datacube-core/pull/1242?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=opendatacube)
```diff
@@ Coverage Diff @@
## develop #1242 +/- ##
===========================================
- Coverage 93.86% 93.86% -0.01%
===========================================
Files 109 109
Lines 10760 10772 +12
===========================================
+ Hits 10100 10111 +11
- Misses 660 661 +1
```
| [Impacted Files](https://codecov.io/gh/opendatacube/datacube-core/pull/1242?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=opendatacube) | Coverage Δ | |
|---|---|---|
| [datacube/utils/geometry/\_base.py](https://codecov.io/gh/opendatacube/datacube-core/pull/1242/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=opendatacube#diff-ZGF0YWN1YmUvdXRpbHMvZ2VvbWV0cnkvX2Jhc2UucHk=) | `99.33% <92.10%> (-0.13%)` | :arrow_down: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/opendatacube/datacube-core/pull/1242?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=opendatacube).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=opendatacube)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/opendatacube/datacube-core/pull/1242?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=opendatacube). Last update [465c44e...426e411](https://codecov.io/gh/opendatacube/datacube-core/pull/1242?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=opendatacube). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=opendatacube).
| diff --git a/datacube/utils/geometry/_base.py b/datacube/utils/geometry/_base.py
index 43454159..41b906ef 100644
--- a/datacube/utils/geometry/_base.py
+++ b/datacube/utils/geometry/_base.py
@@ -116,10 +116,25 @@ class BoundingBox(_BoundingBox):
(p1[1], p2[1]))
[email protected]({})
-def _make_crs(crs_str: str) -> Tuple[_CRS, Optional[int]]:
- crs = _CRS.from_user_input(crs_str)
- return (crs, crs.to_epsg())
+def _make_crs_key(crs_spec: Union[str, _CRS]) -> str:
+ if isinstance(crs_spec, str):
+ normed_epsg = crs_spec.upper()
+ if normed_epsg.startswith("EPSG:"):
+ return normed_epsg
+ return crs_spec
+ return crs_spec.to_wkt()
+
+
[email protected]({}, key=_make_crs_key)
+def _make_crs(crs: Union[str, _CRS]) -> Tuple[_CRS, str, Optional[int]]:
+ if isinstance(crs, str):
+ crs = _CRS.from_user_input(crs)
+ epsg = crs.to_epsg()
+ if epsg is not None:
+ crs_str = f"EPSG:{epsg}"
+ else:
+ crs_str = crs.to_wkt()
+ return (crs, crs_str, crs.to_epsg())
def _make_crs_transform_key(from_crs, to_crs, always_xy):
@@ -131,26 +146,6 @@ def _make_crs_transform(from_crs, to_crs, always_xy):
return Transformer.from_crs(from_crs, to_crs, always_xy=always_xy).transform
-def _guess_crs_str(crs_spec: Any) -> Optional[str]:
- """
- Returns a string representation of the crs spec.
- Returns `None` if it does not understand the spec.
- """
- if isinstance(crs_spec, str):
- return crs_spec
- if isinstance(crs_spec, dict):
- crs_spec = _CRS.from_dict(crs_spec)
-
- if hasattr(crs_spec, 'to_wkt'):
- return crs_spec.to_wkt()
- if hasattr(crs_spec, 'to_epsg'):
- epsg = crs_spec.to_epsg()
- if epsg is not None:
- return 'EPSG:{}'.format(crs_spec.to_epsg())
-
- return None
-
-
class CRS:
"""
Wrapper around `pyproj.CRS` for backwards compatibility.
@@ -160,20 +155,34 @@ class CRS:
__slots__ = ('_crs', '_epsg', '_str')
- def __init__(self, crs_str: Any):
+ def __init__(self, crs_spec: Any):
"""
:param crs_str: string representation of a CRS, often an EPSG code like 'EPSG:4326'
:raises: `pyproj.exceptions.CRSError`
"""
- crs_str = _guess_crs_str(crs_str)
- if crs_str is None:
- raise CRSError("Expect string or any object with `.to_epsg()` or `.to_wkt()` method")
-
- _crs, _epsg = _make_crs(crs_str)
-
- self._crs = _crs
- self._epsg = _epsg
- self._str = crs_str
+ if isinstance(crs_spec, str):
+ self._crs, self._str, self._epsg = _make_crs(crs_spec)
+ elif isinstance(crs_spec, CRS):
+ self._crs = crs_spec._crs
+ self._epsg = crs_spec._epsg
+ self._str = crs_spec._str
+ elif isinstance(crs_spec, _CRS):
+ self._crs, self._str, self._epsg = _make_crs(crs_spec)
+ elif isinstance(crs_spec, dict):
+ self._crs, self._str, self._epsg = _make_crs(_CRS.from_dict(crs_spec))
+ else:
+ _to_epsg = getattr(crs_spec, "to_epsg", None)
+ if _to_epsg is not None:
+ self._crs, self._str, self._epsg = _make_crs(f"EPSG:{_to_epsg()}")
+ return
+ _to_wkt = getattr(crs_spec, "to_wkt", None)
+ if _to_wkt is not None:
+ self._crs, self._str, self._epsg = _make_crs(_to_wkt())
+ return
+
+ raise CRSError(
+ "Expect string or any object with `.to_epsg()` or `.to_wkt()` methods"
+ )
def __getstate__(self):
return {'crs_str': self._str}
@@ -257,7 +266,7 @@ class CRS:
return self._str
def __hash__(self) -> int:
- return hash(self.to_wkt())
+ return hash(self._str)
def __repr__(self) -> str:
return "CRS('%s')" % self._str
@@ -987,7 +996,6 @@ class GeoBox:
"""
def __init__(self, width: int, height: int, affine: Affine, crs: MaybeCRS):
- assert is_affine_st(affine), "Only axis-aligned geoboxes are currently supported"
self.width = width
self.height = height
self.affine = affine
@@ -1110,6 +1118,7 @@ class GeoBox:
"""
dict of coordinate labels
"""
+ assert is_affine_st(self.affine), "Only axis-aligned geoboxes are currently supported"
yres, xres = self.resolution
yoff, xoff = self.affine.yoff, self.affine.xoff
| load_ard() task graph creation limited by GeoBox/CRS hash function performance
### Expected behaviour
When I run load_ard() on a large region (the Murray Darling Basin) using distributed dask, I would like to be able to create up to 500,000 tasks for my workflow. That is, fully utilise the dask scheduler’s capacity before I parallelise the problem across multiple dask clusters/tiles/time slices etc.
### Actual behaviour
On my simplified problem, using dask chunking (1x10240x10240) the production of the task graph (6144 tasks) in the distributed dask cluster takes around 16 seconds. On a real problem, I haven’t the patience to wait for it to finish.
As suggested in the dask manual, I have run the %prun over the persist function to see what is consuming the resources. The step that is taking most of the resources is the cull step of the task graph optimisation. The cull step identifies any tasks that don’t need to be executed and removes them from the task graph. When culling, dask stores the tasks that it identifies in a set. Python sets make use of object hashes to efficiently calculate which objects are in the intersection of those objects used vs those not used. However in the case of the simplified problem, calculating the hash of the CRS consumes most of the processing time (96%).
Why are the CRS hashes part of the task graph? As child of task dc_load_fmask-to_float, two lazy functions (datacube.api.core.fuse_lazy) include a GeoBox as an argument. The GeoBox is a composite object including a CRS. When GeoBox is hashed, then the hash CRS is required.
Why is CRS hashing slow? Open data cube CRS is a wrapper around PyProj CRS, which is a wrapper around Proj. ODC CRS considers CRS an immutable object defined by the value of its contents. When calculating the hash, it uses the hash of the WKT projection text. The underlying Proj library can calculate the text very quickly. However, PyProj function call is much slower. I think it might be due to the overhead of transferring data from C++ to Python. Or it could be creating new proj_context for each new CRS object (to handle multi-threading issues).
The implementation seems well motivated, so I have not been able to bring a solution. Perhaps during the step that generates multiple GeoBox objects, the CRS should be eagerly converted to a WKT for the purposes of the hash.
### Steps to reproduce the behaviour
I have attached a Jupyter notebook.
[geobox_hash_performance.zip](https://github.com/opendatacube/datacube-core/files/8077431/geobox_hash_performance.zip)
A collection of major lines are here:
```
query = {
# Site details
'latitude': (-24.5856075286866, -37.6786722912282),
'longitude': (138.566245999366, 152.491485595703),
'time': ('1988-01-01', '1988-02-29'),
'output_crs': 'EPSG:4326',
'resolution': (-0.0002698, 0.0002698)
}
dc= datacube.Datacube(app='dc-LS-extract')
# Read in Landsat images and bands
bands=['nbart_blue','nbart_green','nbart_red','nbart_nir','nbart_swir_1','nbart_swir_2']
chunks = {'time': 1, 'latitude': 512*2*10, 'longitude': 512*2*10}
ds = load_ard(dc=dc,
products=['ga_ls5t_ard_3','ga_ls7e_ard_3', 'ga_ls8c_ard_3'],
measurements=bands, **query, group_by='solar_day',
dask_chunks= chunks
)
%prun ds.nbart_blue.persist()
773424 function calls (646389 primitive calls) in 15.597 seconds
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
2664 15.096 0.006 15.128 0.006 {method 'to_wkt' of 'pyproj._crs.Base' objects}
52282/17594 0.056 0.000 3.474 0.000 utils.py:1669(stringify)
10315 0.042 0.000 10.197 0.001 core.py:159(keys_in_tasks)
3 0.035 0.012 5.145 1.715 optimization.py:429(fuse)
```
A small snippet to reproduce the pyproj performance (takes about 0.5 seconds):
```
from pyproj import CRS
from pyproj.enums import WktVersion
import timeit
crs = CRS.from_epsg(4326)
timeit.timeit(lambda:crs.to_wkt(),number=100)
```
A small snippet to eliminate the proj performance (takes 6 milliseconds)
```
#include <iostream>
#include <vector>
#include <string>
#include <chrono>
#include <proj.h>
#include <sqlite3.h>
using namespace std;
int main(void)
{
const char *wkt;
std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
PJ_CONTEXT *m_ctxt = proj_context_create();
auto crs = proj_create_from_database(m_ctxt, "EPSG", "4326",
PJ_CATEGORY_CRS, false, nullptr);
const char *const options[] = {"MULTILINE=NO", nullptr};
for (int i = 0; i < 100; i++)
{
wkt = proj_as_wkt(m_ctxt, crs, PJ_WKT1_GDAL, options);
}
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
printf("%s", wkt);
std::cout << "Time difference = " << std::chrono::duration_cast<std::chrono::milliseconds>(end - begin).count() << " millisecond" << std::endl;
}
```
### Environment information
Open Data Cube core, version 1.8.6
pyproj info:
pyproj: 3.2.1
PROJ: 7.2.1
data dir: /usr/local/share/proj
user_data_dir: /home/jovyan/.local/share/proj
System:
python: 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0]
executable: /env/bin/python
machine: Linux-4.14.256-197.484.amzn2.x86_64-x86_64-with-glibc2.29
Python deps:
certifi: 2021.10.08
pip: 21.3.1
setuptools: 58.5.3
Cython: 0.29.24
| opendatacube/datacube-core | diff --git a/tests/test_geometry.py b/tests/test_geometry.py
index 1c989e20..235ba464 100644
--- a/tests/test_geometry.py
+++ b/tests/test_geometry.py
@@ -43,12 +43,12 @@ from datacube.utils.geometry._base import (
bounding_box_in_pixel_domain,
geobox_intersection_conservative,
geobox_union_conservative,
- _guess_crs_str,
force_2d,
_align_pix,
_round_to_res,
_norm_crs,
_norm_crs_or_error,
+ _make_crs_key,
)
from datacube.testutils.geom import (
epsg4326,
@@ -1475,9 +1475,9 @@ def test_crs_hash():
def test_base_internals():
- assert _guess_crs_str(CRS("epsg:3577")) == epsg3577.to_wkt()
+ assert _make_crs_key("epsg:3577") == "EPSG:3577"
no_epsg_crs = CRS(SAMPLE_WKT_WITHOUT_AUTHORITY)
- assert _guess_crs_str(no_epsg_crs) == no_epsg_crs.to_wkt()
+ assert _make_crs_key(no_epsg_crs.proj) == no_epsg_crs.proj.to_wkt()
gjson_bad = {'type': 'a', 'coordinates': [1, [2, 3, 4]]}
assert force_2d(gjson_bad) == {'type': 'a', 'coordinates': [1, [2, 3]]}
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.8 | {
"env_vars": null,
"env_yml_path": [
"conda-environment.yml"
],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "environment.yml",
"pip_packages": null,
"pre_install": [
"apt-get update",
"apt-get install -y libgdal-dev libhdf5-serial-dev libnetcdf-dev"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | affine @ file:///home/conda/feedstock_root/build_artifacts/affine_1733762038348/work
alabaster @ file:///home/conda/feedstock_root/build_artifacts/alabaster_1704848697227/work
astroid==3.3.9
attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1741918516150/work
babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1738490167835/work
bokeh @ file:///home/conda/feedstock_root/build_artifacts/bokeh_1719324651922/work
boto3 @ file:///home/conda/feedstock_root/build_artifacts/boto3_1743235439640/work
botocore @ file:///home/conda/feedstock_root/build_artifacts/botocore_1743212919739/work
Bottleneck==1.4.2
Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1725267488082/work
cachetools @ file:///home/conda/feedstock_root/build_artifacts/cachetools_1740094013202/work
certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1739515848642/work/certifi
cffi==1.17.1
cftime @ file:///home/conda/feedstock_root/build_artifacts/cftime_1725400455427/work
charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1735929714516/work
ciso8601==2.3.2
click @ file:///home/conda/feedstock_root/build_artifacts/click_1734858813237/work
click-plugins @ file:///home/conda/feedstock_root/build_artifacts/click-plugins_1733731077999/work
cligj @ file:///home/conda/feedstock_root/build_artifacts/cligj_1733749956636/work
cloudpickle @ file:///home/conda/feedstock_root/build_artifacts/cloudpickle_1736947526808/work
colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1733218098505/work
commonmark==0.9.1
contourpy @ file:///home/conda/feedstock_root/build_artifacts/contourpy_1727293517607/work
coverage==7.8.0
cryptography==44.0.2
cytoolz @ file:///home/conda/feedstock_root/build_artifacts/cytoolz_1734107207199/work
dask @ file:///home/conda/feedstock_root/build_artifacts/dask-core_1722976580461/work
dask-expr @ file:///home/conda/feedstock_root/build_artifacts/dask-expr_1722982607046/work
-e git+https://github.com/opendatacube/datacube-core.git@465c44edd6b50d14d388170805f397561cde3085#egg=datacube
dill==0.3.9
distributed @ file:///home/conda/feedstock_root/build_artifacts/distributed_1722982528621/work
docutils @ file:///home/conda/feedstock_root/build_artifacts/docutils_1733217766141/work
exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1733208806608/work
fsspec @ file:///home/conda/feedstock_root/build_artifacts/fsspec_1743361113926/work
greenlet @ file:///home/conda/feedstock_root/build_artifacts/greenlet_1734532792566/work
hypothesis==6.130.5
idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1733211830134/work
imagesize @ file:///home/conda/feedstock_root/build_artifacts/imagesize_1656939531508/work
importlib_metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1737420181517/work
importlib_resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1736252299705/work
iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1733223141826/work
isort==6.0.1
Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1741263328855/work
jmespath @ file:///home/conda/feedstock_root/build_artifacts/jmespath_1733229141657/work
jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema_1733472696581/work
jsonschema-specifications @ file:///tmp/tmpk0f344m9/src
lark-parser @ file:///home/conda/feedstock_root/build_artifacts/lark-parser_1725742324642/work
locket @ file:///home/conda/feedstock_root/build_artifacts/locket_1650660393415/work
lz4 @ file:///home/conda/feedstock_root/build_artifacts/lz4_1733474248677/work
MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1733219680183/work
mccabe==0.7.0
moto==5.1.2
msgpack @ file:///home/conda/feedstock_root/build_artifacts/msgpack-python_1725975012026/work
netCDF4 @ file:///home/conda/feedstock_root/build_artifacts/netcdf4_1733253078561/work
numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1732314280888/work/dist/numpy-2.0.2-cp39-cp39-linux_x86_64.whl#sha256=62d98eb3da9f13e6b227c430d01026b7427f341b3fdcb838430f2a9e520417b1
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1733203243479/work
pandas @ file:///home/conda/feedstock_root/build_artifacts/pandas_1736810577256/work
partd @ file:///home/conda/feedstock_root/build_artifacts/partd_1715026491486/work
pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1735929703139/work
pkgutil_resolve_name @ file:///home/conda/feedstock_root/build_artifacts/pkgutil-resolve-name_1733344503739/work
platformdirs==4.3.7
pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1733222765875/work
psutil @ file:///home/conda/feedstock_root/build_artifacts/psutil_1740663125313/work
psycopg2 @ file:///home/conda/feedstock_root/build_artifacts/psycopg2-split_1727892820458/work
pyarrow==19.0.1
pyarrow-hotfix @ file:///home/conda/feedstock_root/build_artifacts/pyarrow-hotfix_1734380560621/work
pycodestyle==2.13.0
pycparser==2.22
Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1736243443484/work
pylint==3.3.6
pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1743089729650/work
pyproj @ file:///home/conda/feedstock_root/build_artifacts/pyproj_1726679693937/work
PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1733217236728/work
pytest @ file:///home/conda/feedstock_root/build_artifacts/pytest_1740946542080/work
pytest-cov==6.0.0
pytest-runner @ file:///home/conda/feedstock_root/build_artifacts/pytest-runner_1646158889426/work
pytest-timeout==2.3.1
pytest_httpserver==1.1.2
python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1733215673016/work
pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1706886791323/work
PyYAML @ file:///home/conda/feedstock_root/build_artifacts/pyyaml_1737454647378/work
rasterio @ file:///home/conda/feedstock_root/build_artifacts/rasterio_1733163417290/work
recommonmark==0.7.1
referencing @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_referencing_1737836872/work
requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1733217035951/work
responses==0.25.7
rpds-py @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_rpds-py_1743037693/work
s3transfer @ file:///home/conda/feedstock_root/build_artifacts/s3transfer_1741171990164/work
setuptools-scm @ file:///home/conda/feedstock_root/build_artifacts/setuptools_scm_1742403392659/work
shapely @ file:///home/conda/feedstock_root/build_artifacts/shapely_1741166945909/work
six @ file:///home/conda/feedstock_root/build_artifacts/six_1733380938961/work
snowballstemmer @ file:///home/conda/feedstock_root/build_artifacts/snowballstemmer_1637143057757/work
snuggs @ file:///home/conda/feedstock_root/build_artifacts/snuggs_1733818638588/work
sortedcontainers @ file:///home/conda/feedstock_root/build_artifacts/sortedcontainers_1738440353519/work
Sphinx @ file:///home/conda/feedstock_root/build_artifacts/sphinx_1721487534232/work
sphinx-autodoc-typehints==2.3.0
sphinx-click @ file:///home/conda/feedstock_root/build_artifacts/sphinx-click_1734814073887/work
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-applehelp_1733754101641/work
sphinxcontrib-devhelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-devhelp_1733754113405/work
sphinxcontrib-htmlhelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-htmlhelp_1733754280555/work
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-jsmath_1733753744933/work
sphinxcontrib-qthelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-qthelp_1733753408017/work
sphinxcontrib-serializinghtml @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-serializinghtml_1733750479108/work
SQLAlchemy @ file:///home/conda/feedstock_root/build_artifacts/sqlalchemy_1743109707043/work
tblib @ file:///home/conda/feedstock_root/build_artifacts/tblib_1733842374544/work
toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1734091811753/work
tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1733256695513/work
tomlkit==0.13.2
toolz @ file:///home/conda/feedstock_root/build_artifacts/toolz_1733736030883/work
tornado @ file:///home/conda/feedstock_root/build_artifacts/tornado_1732615921868/work
typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_typing_extensions_1743201626/work
tzdata @ file:///home/conda/feedstock_root/build_artifacts/python-tzdata_1742745135198/work
urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1718728347128/work
Werkzeug==3.1.3
xarray @ file:///home/conda/feedstock_root/build_artifacts/xarray_1722348170975/work
xmltodict==0.14.2
xyzservices @ file:///home/conda/feedstock_root/build_artifacts/xyzservices_1737234886776/work
zict @ file:///home/conda/feedstock_root/build_artifacts/zict_1733261551178/work
zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1732827521216/work
| name: datacube-core
channels:
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_gnu
- affine=2.4.0=pyhd8ed1ab_1
- alabaster=0.7.16=pyhd8ed1ab_0
- aom=3.9.1=hac33072_0
- attrs=25.3.0=pyh71513ae_0
- aws-c-auth=0.8.6=hd08a7f5_4
- aws-c-cal=0.8.7=h043a21b_0
- aws-c-common=0.12.0=hb9d3cd8_0
- aws-c-compression=0.3.1=h3870646_2
- aws-c-event-stream=0.5.4=h04a3f94_2
- aws-c-http=0.9.4=hb9b18c6_4
- aws-c-io=0.17.0=h3dad3f2_6
- aws-c-mqtt=0.12.2=h108da3e_2
- aws-c-s3=0.7.13=h822ba82_2
- aws-c-sdkutils=0.2.3=h3870646_2
- aws-checksums=0.2.3=h3870646_2
- aws-crt-cpp=0.31.0=h55f77e1_4
- aws-sdk-cpp=1.11.510=h37a5c72_3
- azure-core-cpp=1.14.0=h5cfcd09_0
- azure-identity-cpp=1.10.0=h113e628_0
- azure-storage-blobs-cpp=12.13.0=h3cf044e_1
- azure-storage-common-cpp=12.8.0=h736e048_1
- azure-storage-files-datalake-cpp=12.12.0=ha633028_1
- babel=2.17.0=pyhd8ed1ab_0
- blosc=1.21.6=he440d0b_1
- bokeh=3.4.2=pyhd8ed1ab_0
- boto3=1.37.23=pyhd8ed1ab_0
- botocore=1.37.23=pyge38_1234567_0
- brotli-python=1.1.0=py39hf88036b_2
- bzip2=1.0.8=h4bc722e_7
- c-ares=1.34.4=hb9d3cd8_0
- ca-certificates=2025.1.31=hbcca054_0
- cachetools=5.5.2=pyhd8ed1ab_0
- certifi=2025.1.31=pyhd8ed1ab_0
- cftime=1.6.4=py39hf3d9206_1
- charset-normalizer=3.4.1=pyhd8ed1ab_0
- click=8.1.8=pyh707e725_0
- click-plugins=1.1.1=pyhd8ed1ab_1
- cligj=0.7.2=pyhd8ed1ab_2
- cloudpickle=3.1.1=pyhd8ed1ab_0
- colorama=0.4.6=pyhd8ed1ab_1
- contourpy=1.3.0=py39h74842e3_2
- cyrus-sasl=2.1.27=h54b06d7_7
- cytoolz=1.0.1=py39h8cd3c5a_0
- dask=2024.8.0=pyhd8ed1ab_0
- dask-core=2024.8.0=pyhd8ed1ab_0
- dask-expr=1.1.10=pyhd8ed1ab_0
- dav1d=1.2.1=hd590300_0
- distributed=2024.8.0=pyhd8ed1ab_0
- docutils=0.21.2=pyhd8ed1ab_1
- exceptiongroup=1.2.2=pyhd8ed1ab_1
- freetype=2.13.3=h48d6fc4_0
- freexl=2.0.0=h9dce30a_2
- fsspec=2025.3.1=pyhd8ed1ab_0
- geos=3.13.1=h97f6797_0
- geotiff=1.7.4=h3551947_0
- gflags=2.2.2=h5888daf_1005
- giflib=5.2.2=hd590300_0
- glog=0.7.1=hbabe93e_0
- greenlet=3.1.1=py39hf88036b_1
- hdf4=4.2.15=h2a13503_7
- hdf5=1.14.4=nompi_h2d575fe_105
- icu=75.1=he02047a_0
- idna=3.10=pyhd8ed1ab_1
- imagesize=1.4.1=pyhd8ed1ab_0
- importlib-metadata=8.6.1=pyha770c72_0
- importlib_metadata=8.6.1=hd8ed1ab_0
- importlib_resources=6.5.2=pyhd8ed1ab_0
- iniconfig=2.0.0=pyhd8ed1ab_1
- jinja2=3.1.6=pyhd8ed1ab_0
- jmespath=1.0.1=pyhd8ed1ab_1
- json-c=0.18=h6688a6e_0
- jsonschema=4.23.0=pyhd8ed1ab_1
- jsonschema-specifications=2024.10.1=pyhd8ed1ab_1
- keyutils=1.6.1=h166bdaf_0
- krb5=1.21.3=h659f571_0
- lark-parser=0.12.0=pyhd8ed1ab_1
- lcms2=2.17=h717163a_0
- ld_impl_linux-64=2.43=h712a8e2_4
- lerc=4.0.0=h27087fc_0
- libabseil=20250127.1=cxx17_hbbce691_0
- libaec=1.1.3=h59595ed_0
- libarchive=3.7.7=h4585015_3
- libarrow=19.0.1=h120c447_5_cpu
- libarrow-acero=19.0.1=hcb10f89_5_cpu
- libarrow-dataset=19.0.1=hcb10f89_5_cpu
- libarrow-substrait=19.0.1=h1bed206_5_cpu
- libavif16=1.2.1=hbb36593_2
- libblas=3.9.0=31_h59b9bed_openblas
- libbrotlicommon=1.1.0=hb9d3cd8_2
- libbrotlidec=1.1.0=hb9d3cd8_2
- libbrotlienc=1.1.0=hb9d3cd8_2
- libcblas=3.9.0=31_he106b2a_openblas
- libcrc32c=1.1.2=h9c3ff4c_0
- libcurl=8.12.1=h332b0f4_0
- libde265=1.0.15=h00ab1b0_0
- libdeflate=1.23=h4ddbbb0_0
- libedit=3.1.20250104=pl5321h7949ede_0
- libev=4.33=hd590300_2
- libevent=2.1.12=hf998b51_1
- libexpat=2.6.4=h5888daf_0
- libffi=3.4.6=h2dba641_0
- libgcc=14.2.0=h767d61c_2
- libgcc-ng=14.2.0=h69a702a_2
- libgdal-core=3.10.2=h05269f4_1
- libgfortran=14.2.0=h69a702a_2
- libgfortran5=14.2.0=hf1ad2bd_2
- libgomp=14.2.0=h767d61c_2
- libgoogle-cloud=2.36.0=hc4361e1_1
- libgoogle-cloud-storage=2.36.0=h0121fbd_1
- libgrpc=1.71.0=he753a82_0
- libheif=1.19.7=gpl_hc18d805_100
- libiconv=1.18=h4ce23a2_1
- libjpeg-turbo=3.0.0=hd590300_1
- libkml=1.3.0=hf539b9f_1021
- liblapack=3.9.0=31_h7ac8fdf_openblas
- liblzma=5.6.4=hb9d3cd8_0
- libnetcdf=4.9.2=nompi_h5ddbaa4_116
- libnghttp2=1.64.0=h161d5f1_0
- libnsl=2.0.1=hd590300_0
- libntlm=1.8=hb9d3cd8_0
- libopenblas=0.3.29=pthreads_h94d23a6_0
- libopentelemetry-cpp=1.19.0=hd1b1c89_0
- libopentelemetry-cpp-headers=1.19.0=ha770c72_0
- libparquet=19.0.1=h081d1f1_5_cpu
- libpng=1.6.47=h943b412_0
- libpq=17.4=h27ae623_0
- libprotobuf=5.29.3=h501fc15_0
- libre2-11=2024.07.02=hba17884_3
- librttopo=1.1.0=hd718a1a_18
- libspatialite=5.1.0=h366e088_13
- libsqlite=3.49.1=hee588c1_2
- libssh2=1.11.1=hf672d98_0
- libstdcxx=14.2.0=h8f9b012_2
- libstdcxx-ng=14.2.0=h4852527_2
- libthrift=0.21.0=h0e7cc3e_0
- libtiff=4.7.0=hd9ff511_3
- libutf8proc=2.10.0=h4c51ac1_0
- libuuid=2.38.1=h0b41bf4_0
- libwebp-base=1.5.0=h851e524_0
- libxcb=1.17.0=h8a09558_0
- libxcrypt=4.4.36=hd590300_1
- libxml2=2.13.7=h8d12d68_0
- libzip=1.11.2=h6991a6a_0
- libzlib=1.3.1=hb9d3cd8_2
- locket=1.0.0=pyhd8ed1ab_0
- lz4=4.3.3=py39h92207c2_2
- lz4-c=1.10.0=h5888daf_1
- lzo=2.10=hd590300_1001
- markupsafe=3.0.2=py39h9399b63_1
- minizip=4.0.7=h05a5f5f_3
- msgpack-python=1.1.0=py39h74842e3_0
- ncurses=6.5=h2d0b736_3
- netcdf4=1.7.2=nompi_py39h1defa26_101
- nlohmann_json=3.11.3=he02047a_1
- numpy=2.0.2=py39h9cb892a_1
- openjpeg=2.5.3=h5fbd93e_0
- openldap=2.6.9=he970967_0
- openssl=3.4.1=h7b32b05_0
- orc=2.1.1=h17f744e_1
- packaging=24.2=pyhd8ed1ab_2
- pandas=2.2.3=py39h3b40f6f_2
- partd=1.4.2=pyhd8ed1ab_0
- pcre2=10.44=hba22ea6_2
- pillow=11.1.0=py39h15c0740_0
- pip=25.0.1=pyh8b19718_0
- pkgutil-resolve-name=1.3.10=pyhd8ed1ab_2
- pluggy=1.5.0=pyhd8ed1ab_1
- proj=9.5.1=h0054346_0
- prometheus-cpp=1.3.0=ha5d0236_0
- psutil=7.0.0=py39h8cd3c5a_0
- psycopg2=2.9.9=py39h2bc273e_2
- pthread-stubs=0.4=hb9d3cd8_1002
- pyarrow=19.0.1=py39hf3d152e_0
- pyarrow-core=19.0.1=py39h6117c73_0_cpu
- pyarrow-hotfix=0.6=pyhd8ed1ab_1
- pygments=2.19.1=pyhd8ed1ab_0
- pyparsing=3.2.3=pyhd8ed1ab_1
- pyproj=3.6.1=py39h306d449_10
- pysocks=1.7.1=pyha55dd90_7
- pytest=8.3.5=pyhd8ed1ab_0
- pytest-runner=6.0.0=pyhd8ed1ab_0
- python=3.9.21=h9c0c6dc_1_cpython
- python-dateutil=2.9.0.post0=pyhff2d567_1
- python-tzdata=2025.2=pyhd8ed1ab_0
- python_abi=3.9=5_cp39
- pytz=2024.1=pyhd8ed1ab_0
- pyyaml=6.0.2=py39h9399b63_2
- rasterio=1.4.3=py39h13cc60e_0
- rav1e=0.6.6=he8a937b_2
- re2=2024.07.02=h9925aae_3
- readline=8.2=h8c095d6_2
- referencing=0.36.2=pyh29332c3_0
- requests=2.32.3=pyhd8ed1ab_1
- rpds-py=0.24.0=py39h3506688_0
- s2n=1.5.14=h6c98b2b_0
- s3transfer=0.11.4=pyhd8ed1ab_0
- setuptools=75.8.2=pyhff2d567_0
- setuptools-scm=8.2.1=pyhd8ed1ab_0
- setuptools_scm=8.2.1=hd8ed1ab_0
- shapely=2.0.7=py39h322cc2b_1
- six=1.17.0=pyhd8ed1ab_0
- snappy=1.2.1=h8bd8927_1
- snowballstemmer=2.2.0=pyhd8ed1ab_0
- snuggs=1.4.7=pyhd8ed1ab_2
- sortedcontainers=2.4.0=pyhd8ed1ab_1
- sphinx=7.4.7=pyhd8ed1ab_0
- sphinx-click=6.0.0=pyhd8ed1ab_1
- sphinxcontrib-applehelp=2.0.0=pyhd8ed1ab_1
- sphinxcontrib-devhelp=2.0.0=pyhd8ed1ab_1
- sphinxcontrib-htmlhelp=2.1.0=pyhd8ed1ab_1
- sphinxcontrib-jsmath=1.0.1=pyhd8ed1ab_1
- sphinxcontrib-qthelp=2.0.0=pyhd8ed1ab_1
- sphinxcontrib-serializinghtml=1.1.10=pyhd8ed1ab_1
- sqlalchemy=2.0.40=py39h8cd3c5a_0
- sqlite=3.49.1=h9eae976_2
- svt-av1=3.0.2=h5888daf_0
- tblib=3.0.0=pyhd8ed1ab_1
- tk=8.6.13=noxft_h4845f30_101
- toml=0.10.2=pyhd8ed1ab_1
- tomli=2.2.1=pyhd8ed1ab_1
- toolz=1.0.0=pyhd8ed1ab_1
- tornado=6.4.2=py39h8cd3c5a_0
- typing-extensions=4.13.0=h9fa5a19_1
- typing_extensions=4.13.0=pyh29332c3_1
- tzdata=2025b=h78e105d_0
- uriparser=0.9.8=hac33072_0
- urllib3=1.26.19=pyhd8ed1ab_0
- wheel=0.45.1=pyhd8ed1ab_1
- x265=3.5=h924138e_3
- xarray=2024.7.0=pyhd8ed1ab_0
- xerces-c=3.2.5=h988505b_2
- xorg-libxau=1.0.12=hb9d3cd8_0
- xorg-libxdmcp=1.1.5=hb9d3cd8_0
- xyzservices=2025.1.0=pyhd8ed1ab_0
- yaml=0.2.5=h7f98852_2
- zict=3.0.0=pyhd8ed1ab_1
- zipp=3.21.0=pyhd8ed1ab_1
- zlib=1.3.1=hb9d3cd8_2
- zstd=1.5.7=hb8e6e7a_2
- pip:
- astroid==3.3.9
- bottleneck==1.4.2
- cffi==1.17.1
- ciso8601==2.3.2
- commonmark==0.9.1
- coverage==7.8.0
- cryptography==44.0.2
- datacube==1.8.7.dev84+g465c44ed
- dill==0.3.9
- hypothesis==6.130.5
- isort==6.0.1
- mccabe==0.7.0
- moto==5.1.2
- platformdirs==4.3.7
- pycodestyle==2.13.0
- pycparser==2.22
- pylint==3.3.6
- pytest-cov==6.0.0
- pytest-httpserver==1.1.2
- pytest-timeout==2.3.1
- recommonmark==0.7.1
- responses==0.25.7
- sphinx-autodoc-typehints==2.3.0
- sphinx-rtd-theme==3.0.2
- sphinxcontrib-jquery==4.1
- tomlkit==0.13.2
- werkzeug==3.1.3
- xmltodict==0.14.2
prefix: /opt/conda/envs/datacube-core
| [
"tests/test_geometry.py::test_pickleable",
"tests/test_geometry.py::test_geobox_simple",
"tests/test_geometry.py::test_props",
"tests/test_geometry.py::test_tests",
"tests/test_geometry.py::test_common_crs",
"tests/test_geometry.py::test_to_crs",
"tests/test_geometry.py::test_boundingbox",
"tests/test_geometry.py::test_densify",
"tests/test_geometry.py::test_bbox_union",
"tests/test_geometry.py::test_unary_union",
"tests/test_geometry.py::test_unary_intersection",
"tests/test_geometry.py::TestCRSEqualityComparisons::test_comparison_edge_cases",
"tests/test_geometry.py::TestCRSEqualityComparisons::test_australian_albers_comparison",
"tests/test_geometry.py::test_no_epsg",
"tests/test_geometry.py::test_xy_from_geobox",
"tests/test_geometry.py::test_gen_test_image_xy",
"tests/test_geometry.py::test_fixed_point",
"tests/test_geometry.py::test_geobox",
"tests/test_geometry.py::test_geobox_xr_coords",
"tests/test_geometry.py::test_projected_lon",
"tests/test_geometry.py::test_3d_geometry_converted_to_2d_geometry",
"tests/test_geometry.py::test_3d_point_converted_to_2d_point",
"tests/test_geometry.py::test_crs",
"tests/test_geometry.py::test_polygon_path",
"tests/test_geometry.py::test_gbox_boundary",
"tests/test_geometry.py::test_geobox_scale_down",
"tests/test_geometry.py::test_roi_tools",
"tests/test_geometry.py::test_apply_affine",
"tests/test_geometry.py::test_point_transformer",
"tests/test_geometry.py::test_split_translation",
"tests/test_geometry.py::test_affine_checks",
"tests/test_geometry.py::test_affine_rsw",
"tests/test_geometry.py::test_fit",
"tests/test_geometry.py::test_scale_at_point",
"tests/test_geometry.py::test_pix_transform",
"tests/test_geometry.py::test_compute_reproject_roi",
"tests/test_geometry.py::test_compute_reproject_roi_issue647",
"tests/test_geometry.py::test_compute_reproject_roi_issue1047",
"tests/test_geometry.py::test_window_from_slice",
"tests/test_geometry.py::test_axis_overlap",
"tests/test_geometry.py::test_crs_compat",
"tests/test_geometry.py::test_crs_hash",
"tests/test_geometry.py::test_base_internals",
"tests/test_geometry.py::test_crs_units_per_degree",
"tests/test_geometry.py::test_align_pix[20-30-10-0-expect0]",
"tests/test_geometry.py::test_align_pix[20-30.5-10-0-expect1]",
"tests/test_geometry.py::test_align_pix[20-31.5-10-0-expect2]",
"tests/test_geometry.py::test_align_pix[20-30-10-3-expect3]",
"tests/test_geometry.py::test_align_pix[20-30-10--3-expect4]",
"tests/test_geometry.py::test_align_pix[20-30--10-0-expect5]",
"tests/test_geometry.py::test_align_pix[19.5-30--10-0-expect6]",
"tests/test_geometry.py::test_align_pix[18.5-30--10-0-expect7]",
"tests/test_geometry.py::test_align_pix[20-30--10-3-expect8]",
"tests/test_geometry.py::test_align_pix[20-30--10--3-expect9]",
"tests/test_geometry.py::test_lonlat_bounds"
] | [
"tests/test_geometry.py::test_ops",
"tests/test_geometry.py::test_geom_split",
"tests/test_geometry.py::test_multigeom",
"tests/test_geometry.py::test_shapely_wrappers",
"tests/test_geometry.py::test_chop",
"tests/test_geometry.py::test_clip_lon180",
"tests/test_geometry.py::test_wrap_dateline",
"tests/test_geometry.py::test_wrap_dateline_sinusoidal[pts0]",
"tests/test_geometry.py::test_wrap_dateline_sinusoidal[pts1]",
"tests/test_geometry.py::test_wrap_dateline_sinusoidal[pts2]",
"tests/test_geometry.py::test_wrap_dateline_utm",
"tests/test_geometry.py::test_geom_clone"
] | [] | [] | Apache License 2.0 | 12,365 | 1,439 | [
"datacube/utils/geometry/_base.py"
] |
VirtusLab__git-machete-463 | d5093f1a6f16a6f834fe71ddae9e4e63a2b18a03 | 2022-03-08 10:15:24 | a9d5a9fe723d808299a0f462201b6690d89028a4 | diff --git a/git_machete/client.py b/git_machete/client.py
index b8a55b1..0e44ae6 100644
--- a/git_machete/client.py
+++ b/git_machete/client.py
@@ -1947,28 +1947,27 @@ class MacheteClient:
if pr_from_github:
result.append(pr_from_github)
else:
- if len(prs_list) > 1:
- warn(f"PR #{pr_no} is not found in repository `{org}/{repo}`, skipping.")
- else:
- raise MacheteException(f"PR #{pr_no} is not found in repository `{org}/{repo}`")
+ raise MacheteException(f"PR #{pr_no} is not found in repository `{org}/{repo}`")
if not result:
raise MacheteException(
f"Given PRs: {', '.join(map(str, prs_list))} are not found in repository `{org}/{repo}`")
return result
if all:
if not all_opened_prs_from_github:
- raise MacheteException(f"Currently there is not any pull request opened in repository `{org}/{repo}`")
+ warn(f"Currently there are no pull requests opened in repository `{org}/{repo}`")
+ return []
return all_opened_prs_from_github
elif my and user:
result = [pr for pr in all_opened_prs_from_github if pr.user == user]
if not result:
- raise MacheteException(
- f"Current user {user} has no open pull request in repository `{org}/{repo}`")
+ warn(f"Current user `{user}` has no open pull request in repository `{org}/{repo}`")
+ return []
return result
elif by:
result = [pr for pr in all_opened_prs_from_github if pr.user == by]
if not result:
- raise MacheteException(f"User {by} has no open pull request in repository `{org}/{repo}`")
+ warn(f"User `{by}` has no open pull request in repository `{org}/{repo}`")
+ return []
return result
return []
| `checkout_github_prs()` should NOT fail if no PRs for the given criteria are found | VirtusLab/git-machete | diff --git a/git_machete/tests/functional/test_machete.py b/git_machete/tests/functional/test_machete.py
index fc4d788..7718ce8 100644
--- a/git_machete/tests/functional/test_machete.py
+++ b/git_machete/tests/functional/test_machete.py
@@ -2220,6 +2220,7 @@ class MacheteTester(unittest.TestCase):
@mock.patch('git_machete.github.GITHUB_REMOTE_PATTERNS', FAKE_GITHUB_REMOTE_PATTERNS)
@mock.patch('git_machete.options.CommandLineOptions', FakeCommandLineOptions)
@mock.patch('git_machete.utils.run_cmd', mock_run_cmd) # to hide git outputs in tests
+ @mock.patch('git_machete.github.__get_github_token', mock__get_github_token)
@mock.patch('urllib.request.Request', git_api_state_for_test_checkout_prs.new_request())
@mock.patch('urllib.request.urlopen', MockContextManager)
def test_github_checkout_prs(self) -> None:
@@ -2416,6 +2417,16 @@ class MacheteTester(unittest.TestCase):
self.assertEqual(e.exception.parameter, expected_error_message,
'Verify that expected error message has appeared when given pull request to checkout does not exists.')
+ with self.assertRaises(MacheteException) as e:
+ self.launch_command('github', 'checkout-prs', '19', '100')
+ if e:
+ self.assertEqual(e.exception.parameter, expected_error_message,
+ 'Verify that expected error message has appeared when one of the given pull requests to checkout does not exists.')
+
+ # check against user with no open pull requests
+ expected_msg = f"Warn: User `tester` has no open pull request in repository `{org}/{repo}`\n"
+ self.assert_command(['github', 'checkout-prs', '--by', 'tester'], expected_msg, strip_indentation=False)
+
# Check against closed pull request with head branch deleted from remote
local_path = popen("mktemp -d")
self.repo_sandbox.new_repo(GitRepositorySandbox.second_remote_path)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 3.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
-e git+https://github.com/VirtusLab/git-machete.git@d5093f1a6f16a6f834fe71ddae9e4e63a2b18a03#egg=git_machete
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
| name: git-machete
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/git-machete
| [
"git_machete/tests/functional/test_machete.py::MacheteTester::test_github_checkout_prs"
] | [] | [
"git_machete/tests/functional/test_machete.py::MacheteTester::test_add",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_advance_with_few_possible_downstream_branches_and_yes_option",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_advance_with_no_downstream_branches",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_advance_with_one_downstream_branch",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_anno_prs",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_branch_reappears_in_definition",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_discover_traverse_squash",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_github_checkout_prs_freshly_cloned",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_github_checkout_prs_from_fork_with_deleted_repo",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_github_create_pr",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_github_create_pr_missing_base_branch_on_remote",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_go_down",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_go_first_root_with_downstream",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_go_first_root_without_downstream",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_go_last",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_go_next_successor_exists",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_go_next_successor_on_another_root_tree",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_go_prev_successor_exists",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_go_prev_successor_on_another_root_tree",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_go_root",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_go_up",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_help",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_log",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_retarget_pr",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_show",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_show_down",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_show_first",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_show_last",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_show_next",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_show_prev",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_show_root",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_show_up",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_slide_out",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_slide_out_with_down_fork_point_and_multiple_children_of_last_branch",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_slide_out_with_invalid_down_fork_point",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_slide_out_with_valid_down_fork_point",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_squash_merge",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_squash_with_invalid_fork_point",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_squash_with_valid_fork_point",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_traverse_no_push",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_traverse_no_push_override",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_traverse_no_push_untracked",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_update_with_fork_point_not_specified",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_update_with_fork_point_specified",
"git_machete/tests/functional/test_machete.py::MacheteTester::test_update_with_invalid_fork_point"
] | [] | MIT License | 12,366 | 489 | [
"git_machete/client.py"
] |
|
Dorthu__openapi3-77 | 9b4dc86bc736eddb8b7f1e6c88392fc35dc65b1a | 2022-03-08 13:57:28 | 7039a15e65477b505467b85bcd09215645f3ec9a | diff --git a/openapi3/example.py b/openapi3/example.py
index ea9a959..b2185d0 100644
--- a/openapi3/example.py
+++ b/openapi3/example.py
@@ -17,5 +17,5 @@ class Example(ObjectBase):
"""
self.summary = self._get("summary", str)
self.description = self._get("description", str)
- self.value = self._get("value", ["Reference", dict, str]) # 'any' type
+ self.value = self._get("value", "*")
self.externalValue = self._get("externalValue", str)
diff --git a/openapi3/paths.py b/openapi3/paths.py
index 0b31cc1..6891ad5 100644
--- a/openapi3/paths.py
+++ b/openapi3/paths.py
@@ -467,7 +467,7 @@ class MediaType(ObjectBase):
Implementation of :any:`ObjectBase._parse_data`
"""
self.schema = self._get("schema", ["Schema", "Reference"])
- self.example = self._get("example", str) # 'any' type
+ self.example = self._get("example", "*")
self.examples = self._get("examples", ["Example", "Reference"], is_map=True)
self.encoding = self._get("encoding", dict) # Map['Encoding']
| SpecError when the result is an array
```
openapi3.errors.SpecError: Expected paths./phone/batch_locations.post.responses.201.content.application/json.examples.response.value to be one of [Reference,<class 'dict'>,<class 'str'>], got <class 'list'>
```
The attached file is a reduced version of the spec that exhibits the problem, available from https://marketplace.zoom.us/docs/page-data/api-reference/phone/methods/page-data.json (you need to access `R.result.pageContext.OAS.spec`, then add an `email` field to the `contact` object, duh) where I found the problem.
[phone.json.txt](https://github.com/Dorthu/openapi3/files/8187138/phone.json.txt)
| Dorthu/openapi3 | diff --git a/tests/conftest.py b/tests/conftest.py
index b4e83cf..0e92712 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -186,3 +186,11 @@ def rfc_6901():
Provides a spec that includes RFC 6901 escape codes in ref paths
"""
yield _get_parsed_yaml("rfc_6901.yaml")
+
+
[email protected]
+def with_array_example():
+ """
+ Provides a spec that includes arrays as the value of examples
+ """
+ yield _get_parsed_yaml("example_array.yaml")
diff --git a/tests/fixtures/example_array.yaml b/tests/fixtures/example_array.yaml
new file mode 100644
index 0000000..e47cf16
--- /dev/null
+++ b/tests/fixtures/example_array.yaml
@@ -0,0 +1,41 @@
+# this is a valid spec that contains a schema with an array as the value of an
+# exmaple object
+openapi: "3.1.0"
+info:
+ version: 1.0.0
+ title: Examples Array
+paths:
+ /example:
+ get:
+ operationId: exampleArrayGet
+ responses:
+ '200':
+ description: example
+ content:
+ application/json:
+ example:
+ - name: something
+ schema:
+ properties:
+ name:
+ type: string
+ description: example
+ example: something
+ /examples:
+ get:
+ operationId: examplesArrayGet
+ responses:
+ '200':
+ description: example
+ content:
+ application/json:
+ examples:
+ one:
+ value:
+ - name: something
+ schema:
+ properties:
+ name:
+ type: string
+ description: example
+ example: something
diff --git a/tests/parsing_test.py b/tests/parsing_test.py
index 74bcda3..b69036a 100644
--- a/tests/parsing_test.py
+++ b/tests/parsing_test.py
@@ -132,3 +132,11 @@ def test_securityparameters(with_securityparameters):
spec = OpenAPI(with_securityparameters, validate=True)
errors = spec.errors()
assert len(errors) == 0
+
+
+def test_example_type_array(with_array_example):
+ """
+ Tests that examples, definied as "any" type, accept arrays
+ """
+ spec = OpenAPI(with_array_example, validate=True)
+ assert len(spec.errors()) == 0, spec.errors()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 2
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/Dorthu/openapi3.git@9b4dc86bc736eddb8b7f1e6c88392fc35dc65b1a#egg=openapi3
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
PyYAML==6.0.2
requests==2.32.3
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
urllib3==2.3.0
| name: openapi3
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- idna==3.10
- pyyaml==6.0.2
- requests==2.32.3
- urllib3==2.3.0
prefix: /opt/conda/envs/openapi3
| [
"tests/parsing_test.py::test_example_type_array"
] | [] | [
"tests/parsing_test.py::test_parse_from_yaml",
"tests/parsing_test.py::test_parsing_fails",
"tests/parsing_test.py::test_parsing_broken_refernece",
"tests/parsing_test.py::test_parsing_wrong_parameter_name",
"tests/parsing_test.py::test_parsing_dupe_operation_id",
"tests/parsing_test.py::test_parsing_parameter_name_with_underscores",
"tests/parsing_test.py::test_object_example",
"tests/parsing_test.py::test_parsing_float_validation",
"tests/parsing_test.py::test_parsing_with_links",
"tests/parsing_test.py::test_param_types",
"tests/parsing_test.py::test_parsing_broken_links",
"tests/parsing_test.py::test_securityparameters"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,368 | 330 | [
"openapi3/example.py",
"openapi3/paths.py"
] |
|
nilearn__nilearn-3174 | e1c2f8bd9cd22d9cce9028bb3a56ac7df73810b5 | 2022-03-09 09:35:56 | f40ecbae9e546987e517f8e27208cc3a9ea445d1 | github-actions[bot]: 👋 @NicolasGensollen Thanks for creating a PR!
Until this PR is ready for review, you can include the [WIP] tag in its title, or leave it as a github draft.
Please make sure it is compliant with our [contributing guidelines](https://nilearn.github.io/development.html#contribution-guidelines). In particular, be sure it checks the boxes listed below.
- [ ] PR has an interpretable title.
- [ ] PR links to Github issue with mention "Closes #XXXX"
- [ ] Code is PEP8-compliant.
- [ ] (Bug fixes) There is at least one test that would fail under the original bug conditions.
- [ ] (New features) There is at least one unit test per new function / class.
- [ ] (New features) The new feature is demoed in at least one relevant example.
We will review it as quick as possible, feel free to ping us with questions if needed.
codecov[bot]: # [Codecov](https://codecov.io/gh/nilearn/nilearn/pull/3174?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) Report
> Merging [#3174](https://codecov.io/gh/nilearn/nilearn/pull/3174?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) (bb71210) into [main](https://codecov.io/gh/nilearn/nilearn/commit/e1c2f8bd9cd22d9cce9028bb3a56ac7df73810b5?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) (e1c2f8b) will **increase** coverage by `0.03%`.
> The diff coverage is `100.00%`.
> :exclamation: Current head bb71210 differs from pull request most recent head 4c1060f. Consider uploading reports for the commit 4c1060f to get more accurate results
[](https://codecov.io/gh/nilearn/nilearn/pull/3174?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn)
```diff
@@ Coverage Diff @@
## main #3174 +/- ##
==========================================
+ Coverage 90.42% 90.45% +0.03%
==========================================
Files 122 122
Lines 14593 14593
Branches 2982 2982
==========================================
+ Hits 13195 13200 +5
+ Misses 826 823 -3
+ Partials 572 570 -2
```
| [Impacted Files](https://codecov.io/gh/nilearn/nilearn/pull/3174?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn) | Coverage Δ | |
|---|---|---|
| [nilearn/datasets/func.py](https://codecov.io/gh/nilearn/nilearn/pull/3174/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn#diff-bmlsZWFybi9kYXRhc2V0cy9mdW5jLnB5) | `77.79% <100.00%> (+0.79%)` | :arrow_up: |
| [nilearn/reporting/glm\_reporter.py](https://codecov.io/gh/nilearn/nilearn/pull/3174/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn#diff-bmlsZWFybi9yZXBvcnRpbmcvZ2xtX3JlcG9ydGVyLnB5) | `93.77% <0.00%> (-1.78%)` | :arrow_down: |
| [nilearn/reporting/\_get\_clusters\_table.py](https://codecov.io/gh/nilearn/nilearn/pull/3174/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn#diff-bmlsZWFybi9yZXBvcnRpbmcvX2dldF9jbHVzdGVyc190YWJsZS5weQ==) | `100.00% <0.00%> (+3.12%)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/nilearn/nilearn/pull/3174?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/nilearn/nilearn/pull/3174?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn). Last update [e1c2f8b...4c1060f](https://codecov.io/gh/nilearn/nilearn/pull/3174?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=nilearn).
| diff --git a/nilearn/datasets/func.py b/nilearn/datasets/func.py
index f67abcd70..4af82a3f0 100644
--- a/nilearn/datasets/func.py
+++ b/nilearn/datasets/func.py
@@ -971,11 +971,11 @@ def fetch_abide_pcp(data_dir=None, n_subjects=None, pipeline='cpac',
'ABIDE_Initiative')
if quality_checked:
- kwargs['qc_rater_1'] = b'OK'
- kwargs['qc_anat_rater_2'] = [b'OK', b'maybe']
- kwargs['qc_func_rater_2'] = [b'OK', b'maybe']
- kwargs['qc_anat_rater_3'] = b'OK'
- kwargs['qc_func_rater_3'] = b'OK'
+ kwargs['qc_rater_1'] = 'OK'
+ kwargs['qc_anat_rater_2'] = ['OK', 'maybe']
+ kwargs['qc_func_rater_2'] = ['OK', 'maybe']
+ kwargs['qc_anat_rater_3'] = 'OK'
+ kwargs['qc_func_rater_3'] = 'OK'
# Fetch the phenotypic file and load it
csv = 'Phenotypic_V1_0b_preprocessed1.csv'
| `fetch_abide_pcp` returns empty results
since #2829 `fetch_abide_pcp` returns empty results. This is due to the fact that the filters applied to phenotypic data expect this array to contain `bytes`: https://github.com/nilearn/nilearn/blob/1ebf6b70d79d88e6c3de6d9799c79309c6c99277/nilearn/datasets/func.py#L974 as this is what `np.recfromcsv` returns but it is now `str` since `pd.read_csv` is now used: https://github.com/nilearn/nilearn/blob/1ebf6b70d79d88e6c3de6d9799c79309c6c99277/nilearn/datasets/func.py#L1000
| nilearn/nilearn | diff --git a/doc/changes/latest.rst b/doc/changes/latest.rst
index ec114d0d2..ebac63719 100644
--- a/doc/changes/latest.rst
+++ b/doc/changes/latest.rst
@@ -13,6 +13,7 @@ NEW
Fixes
-----
+- Fix function :func:`~datasets.fetch_abide_pcp` which was returning empty phenotypes and ``func_preproc`` after release ``0.9.0`` due to supporting pandas dataframes in fetchers (:gh:`3174` by `Nicolas Gensollen`_).
Enhancements
------------
diff --git a/nilearn/datasets/tests/test_func.py b/nilearn/datasets/tests/test_func.py
index c46b807d0..877bed4bc 100644
--- a/nilearn/datasets/tests/test_func.py
+++ b/nilearn/datasets/tests/test_func.py
@@ -282,23 +282,39 @@ def test_fetch_localizer_button_task(tmp_path, request_mocker,
assert dataset.description != ''
-def test_fetch_abide_pcp(tmp_path, request_mocker):
- ids = list(range(800))
- filenames = ['no_filename'] * 800
- filenames[::2] = ['filename'] * 400
- pheno = pd.DataFrame({"subject_id": ids, "FILE_ID": filenames},
- columns=["subject_id", "FILE_ID"])
[email protected]("quality_checked", [False, True])
+def test_fetch_abide_pcp(tmp_path, request_mocker, quality_checked):
+ n_subjects = 800
+ ids = list(range(n_subjects))
+ filenames = ['no_filename'] * n_subjects
+ filenames[::2] = ['filename'] * int(n_subjects / 2)
+ qc_rater_1 = ['OK'] * n_subjects
+ qc_rater_1[::4] = ['fail'] * int(n_subjects / 4)
+ pheno = pd.DataFrame(
+ {"subject_id": ids,
+ "FILE_ID": filenames,
+ "qc_rater_1": qc_rater_1,
+ "qc_anat_rater_2": qc_rater_1,
+ "qc_func_rater_2": qc_rater_1,
+ "qc_anat_rater_3": qc_rater_1,
+ "qc_func_rater_3": qc_rater_1},
+ columns=[
+ "subject_id", "FILE_ID", "qc_rater_1",
+ "qc_anat_rater_2", "qc_func_rater_2",
+ "qc_anat_rater_3", "qc_func_rater_3"]
+ )
request_mocker.url_mapping["*rocessed1.csv"] = pheno.to_csv(index=False)
# All subjects
dataset = func.fetch_abide_pcp(data_dir=tmp_path,
- quality_checked=False, verbose=0)
- assert len(dataset.func_preproc) == 400
+ quality_checked=quality_checked, verbose=0)
+ div = 4 if quality_checked else 2
+ assert len(dataset.func_preproc) == n_subjects / div
assert dataset.description != ''
# Smoke test using only a string, rather than a list of strings
dataset = func.fetch_abide_pcp(data_dir=tmp_path,
- quality_checked=False, verbose=0,
+ quality_checked=quality_checked, verbose=0,
derivatives='func_preproc')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
idna==3.10
importlib_resources==6.5.2
iniconfig==2.1.0
joblib==1.4.2
nibabel==5.3.2
-e git+https://github.com/nilearn/nilearn.git@e1c2f8bd9cd22d9cce9028bb3a56ac7df73810b5#egg=nilearn
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.32.3
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
threadpoolctl==3.6.0
tomli==2.2.1
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
zipp==3.21.0
| name: nilearn
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- idna==3.10
- importlib-resources==6.5.2
- iniconfig==2.1.0
- joblib==1.4.2
- nibabel==5.3.2
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.32.3
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- threadpoolctl==3.6.0
- tomli==2.2.1
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/nilearn
| [
"nilearn/datasets/tests/test_func.py::test_fetch_abide_pcp[True]"
] | [
"nilearn/datasets/tests/test_func.py::test__load_mixed_gambles",
"nilearn/datasets/tests/test_func.py::test_fetch_development_fmri"
] | [
"nilearn/datasets/tests/test_func.py::test_fetch_haxby",
"nilearn/datasets/tests/test_func.py::test_fetch_adhd",
"nilearn/datasets/tests/test_func.py::test_miyawaki2008",
"nilearn/datasets/tests/test_func.py::test_fetch_localizer_contrasts",
"nilearn/datasets/tests/test_func.py::test_fetch_localizer_calculation_task",
"nilearn/datasets/tests/test_func.py::test_fetch_localizer_button_task",
"nilearn/datasets/tests/test_func.py::test_fetch_abide_pcp[False]",
"nilearn/datasets/tests/test_func.py::test_fetch_mixed_gambles",
"nilearn/datasets/tests/test_func.py::test_check_parameters_megatrawls_datasets",
"nilearn/datasets/tests/test_func.py::test_fetch_megatrawls_netmats",
"nilearn/datasets/tests/test_func.py::test_fetch_surf_nki_enhanced",
"nilearn/datasets/tests/test_func.py::test_fetch_development_fmri_participants",
"nilearn/datasets/tests/test_func.py::test_fetch_development_fmri_functional",
"nilearn/datasets/tests/test_func.py::test_fetch_development_fmri_invalid_n_subjects",
"nilearn/datasets/tests/test_func.py::test_fetch_development_fmri_exception",
"nilearn/datasets/tests/test_func.py::test_fetch_bids_langloc_dataset",
"nilearn/datasets/tests/test_func.py::test_select_from_index",
"nilearn/datasets/tests/test_func.py::test_fetch_openneuro_dataset_index",
"nilearn/datasets/tests/test_func.py::test_fetch_openneuro_dataset",
"nilearn/datasets/tests/test_func.py::test_fetch_localizer",
"nilearn/datasets/tests/test_func.py::test_fetch_language_localizer_demo_dataset",
"nilearn/datasets/tests/test_func.py::test_make_spm_auditory_events_file",
"nilearn/datasets/tests/test_func.py::test_fetch_spm_auditory",
"nilearn/datasets/tests/test_func.py::test_fetch_spm_multimodal",
"nilearn/datasets/tests/test_func.py::test_fiac"
] | [] | New BSD License | 12,375 | 321 | [
"nilearn/datasets/func.py"
] |
ResearchObject__ro-crate-py-111 | 043f7054c28d96128898435144049d1240ea6ea4 | 2022-03-09 13:48:46 | 6fe2b480f2e31baec3f3958c95067bb8d7e1ad9b | diff --git a/rocrate/cli.py b/rocrate/cli.py
index 7d8dad7..a74ec90 100644
--- a/rocrate/cli.py
+++ b/rocrate/cli.py
@@ -23,6 +23,7 @@ from .rocrate import ROCrate
from .model.computerlanguage import LANG_MAP
from .model.testservice import SERVICE_MAP
from .model.softwareapplication import APP_MAP
+from .utils import is_url
LANG_CHOICES = list(LANG_MAP)
@@ -30,10 +31,31 @@ SERVICE_CHOICES = list(SERVICE_MAP)
ENGINE_CHOICES = list(APP_MAP)
+def add_hash(id_):
+ if id_ is None or id_.startswith("#") or is_url(id_):
+ return id_
+ return "#" + id_
+
+
class State:
pass
+class CSVParamType(click.ParamType):
+ name = "csv"
+
+ def convert(self, value, param, ctx):
+ if isinstance(value, (list, tuple, set, frozenset)):
+ return value
+ try:
+ return value.split(",") if value else []
+ except AttributeError:
+ self.fail(f"{value!r} is not splittable", param, ctx)
+
+
+CSV = CSVParamType()
+
+
@click.group()
@click.option('-c', '--crate-dir', type=click.Path())
@click.pass_context
@@ -44,9 +66,10 @@ def cli(ctx, crate_dir):
@cli.command()
@click.option('--gen-preview', is_flag=True)
[email protected]('-e', '--exclude', type=CSV)
@click.pass_obj
-def init(state, gen_preview):
- crate = ROCrate(state.crate_dir, init=True, gen_preview=gen_preview)
+def init(state, gen_preview, exclude):
+ crate = ROCrate(state.crate_dir, init=True, gen_preview=gen_preview, exclude=exclude)
crate.metadata.write(state.crate_dir)
if crate.preview:
crate.preview.write(state.crate_dir)
@@ -80,7 +103,7 @@ def workflow(state, path, language):
@click.option('-m', '--main-entity')
@click.pass_obj
def suite(state, identifier, name, main_entity):
- suite_ = state.crate.add_test_suite(identifier=identifier, name=name, main_entity=main_entity)
+ suite_ = state.crate.add_test_suite(identifier=add_hash(identifier), name=name, main_entity=main_entity)
state.crate.metadata.write(state.crate_dir)
print(suite_.id)
@@ -94,7 +117,10 @@ def suite(state, identifier, name, main_entity):
@click.option('-n', '--name')
@click.pass_obj
def instance(state, suite, url, resource, service, identifier, name):
- instance_ = state.crate.add_test_instance(suite, url, resource=resource, service=service, identifier=identifier, name=name)
+ instance_ = state.crate.add_test_instance(
+ add_hash(suite), url, resource=resource, service=service,
+ identifier=add_hash(identifier), name=name
+ )
state.crate.metadata.write(state.crate_dir)
print(instance_.id)
diff --git a/rocrate/rocrate.py b/rocrate/rocrate.py
index 8d5befc..95f55d4 100644
--- a/rocrate/rocrate.py
+++ b/rocrate/rocrate.py
@@ -19,7 +19,6 @@
import errno
import json
-import os
import uuid
import zipfile
import atexit
@@ -47,7 +46,7 @@ from .model.testservice import TestService, get_service
from .model.softwareapplication import SoftwareApplication, get_app, PLANEMO_DEFAULT_VERSION
from .model.testsuite import TestSuite
-from .utils import is_url, subclasses, get_norm_value
+from .utils import is_url, subclasses, get_norm_value, walk
def read_metadata(metadata_path):
@@ -81,7 +80,8 @@ def pick_type(json_entity, type_map, fallback=None):
class ROCrate():
- def __init__(self, source=None, gen_preview=False, init=False):
+ def __init__(self, source=None, gen_preview=False, init=False, exclude=None):
+ self.exclude = exclude
self.__entity_map = {}
self.default_entities = []
self.data_entities = []
@@ -108,7 +108,7 @@ class ROCrate():
if not top_dir.is_dir():
raise NotADirectoryError(errno.ENOTDIR, f"'{top_dir}': not a directory")
self.add(RootDataset(self), Metadata(self))
- for root, dirs, files in os.walk(top_dir):
+ for root, dirs, files in walk(top_dir, exclude=self.exclude):
root = Path(root)
for name in dirs:
source = root / name
@@ -453,7 +453,7 @@ class ROCrate():
# fetch all files defined in the crate
def _copy_unlisted(self, top, base_path):
- for root, dirs, files in os.walk(top):
+ for root, dirs, files in walk(top, exclude=self.exclude):
root = Path(root)
for name in dirs:
source = root / name
diff --git a/rocrate/utils.py b/rocrate/utils.py
index 2843f59..76507df 100644
--- a/rocrate/utils.py
+++ b/rocrate/utils.py
@@ -18,6 +18,7 @@
# limitations under the License.
import collections
+import os
from datetime import datetime, timezone
from urllib.parse import urlsplit
@@ -78,3 +79,12 @@ def get_norm_value(json_entity, prop):
return [_ if isinstance(_, str) else _["@id"] for _ in value]
except (TypeError, KeyError):
raise ValueError(f"Malformed value for {prop!r}: {json_entity.get(prop)!r}")
+
+
+def walk(top, topdown=True, onerror=None, followlinks=False, exclude=None):
+ exclude = frozenset(exclude or [])
+ for root, dirs, files in os.walk(top):
+ if exclude:
+ dirs[:] = [_ for _ in dirs if _ not in exclude]
+ files[:] = [_ for _ in files if _ not in exclude]
+ yield root, dirs, files
| Exclude option for crate init
It would be nice to support an `exclude` option to avoid considering certain sub-paths when initializing an RO-Crate from a directory tree, i.e., `ROCrate(source, init=True)`. For instance, from the command line, you might want to run:
```
rocrate init --exclude .git
```
| ResearchObject/ro-crate-py | diff --git a/test/test_cli.py b/test/test_cli.py
index bcfe14f..adcda3b 100644
--- a/test/test_cli.py
+++ b/test/test_cli.py
@@ -20,6 +20,7 @@ import pytest
from click.testing import CliRunner
from rocrate.cli import cli
+from rocrate.model.file import File
from rocrate.model.metadata import TESTING_EXTRA_TERMS
from rocrate.rocrate import ROCrate
@@ -53,6 +54,22 @@ def test_cli_init(test_data_dir, helpers, monkeypatch, cwd, gen_preview):
assert json_entities["sort-and-change-case.ga"]["@type"] == "File"
+def test_cli_init_exclude(test_data_dir, helpers):
+ crate_dir = test_data_dir / "ro-crate-galaxy-sortchangecase"
+ (crate_dir / helpers.METADATA_FILE_NAME).unlink()
+ exclude = "test,README.md"
+ runner = CliRunner()
+ args = ["-c", str(crate_dir), "init", "-e", exclude]
+ assert runner.invoke(cli, args).exit_code == 0
+ crate = ROCrate(crate_dir)
+ for p in "LICENSE", "sort-and-change-case.ga":
+ assert isinstance(crate.dereference(p), File)
+ for p in exclude.split(",") + ["test/"]:
+ assert not crate.dereference(p)
+ for e in crate.data_entities:
+ assert not(e.id.startswith("test"))
+
+
@pytest.mark.parametrize("cwd", [False, True])
def test_cli_add_workflow(test_data_dir, helpers, monkeypatch, cwd):
# init
@@ -94,7 +111,7 @@ def test_cli_add_test_metadata(test_data_dir, helpers, monkeypatch, cwd):
assert json_entities[def_id]["@type"] == "File"
# add workflow
wf_path = crate_dir / "sort-and-change-case.ga"
- runner.invoke(cli, ["-c", str(crate_dir), "add", "workflow", "-l", "galaxy", str(wf_path)]).exit_code == 0
+ assert runner.invoke(cli, ["-c", str(crate_dir), "add", "workflow", "-l", "galaxy", str(wf_path)]).exit_code == 0
# add test suite
result = runner.invoke(cli, ["-c", str(crate_dir), "add", "test-suite"])
assert result.exit_code == 0
@@ -133,6 +150,32 @@ def test_cli_add_test_metadata(test_data_dir, helpers, monkeypatch, cwd):
assert set(TESTING_EXTRA_TERMS.items()).issubset(extra_terms.items())
[email protected]("hash_", [False, True])
+def test_cli_add_test_metadata_explicit_ids(test_data_dir, helpers, monkeypatch, hash_):
+ crate_dir = test_data_dir / "ro-crate-galaxy-sortchangecase"
+ runner = CliRunner()
+ assert runner.invoke(cli, ["-c", str(crate_dir), "init"]).exit_code == 0
+ wf_path = crate_dir / "sort-and-change-case.ga"
+ assert runner.invoke(cli, ["-c", str(crate_dir), "add", "workflow", "-l", "galaxy", str(wf_path)]).exit_code == 0
+ suite_id = "#foo"
+ cli_suite_id = suite_id if hash_ else suite_id[1:]
+ result = runner.invoke(cli, ["-c", str(crate_dir), "add", "test-suite", "-i", cli_suite_id])
+ assert result.exit_code == 0
+ assert result.output.strip() == suite_id
+ json_entities = helpers.read_json_entities(crate_dir)
+ assert suite_id in json_entities
+ instance_id = "#bar"
+ cli_instance_id = instance_id if hash_ else instance_id[1:]
+ result = runner.invoke(
+ cli, ["-c", str(crate_dir), "add", "test-instance", cli_suite_id,
+ "http://example.com", "-r", "jobs", "-i", cli_instance_id]
+ )
+ assert result.exit_code == 0
+ assert result.output.strip() == instance_id
+ json_entities = helpers.read_json_entities(crate_dir)
+ assert instance_id in json_entities
+
+
@pytest.mark.parametrize("cwd", [False, True])
def test_cli_write_zip(test_data_dir, monkeypatch, cwd):
crate_dir = test_data_dir / "ro-crate-galaxy-sortchangecase"
diff --git a/test/test_read.py b/test/test_read.py
index ba948c8..323fb92 100644
--- a/test/test_read.py
+++ b/test/test_read.py
@@ -224,6 +224,29 @@ def test_init(test_data_dir, tmpdir, helpers, override):
assert f1.read() == f2.read()
+def test_exclude(test_data_dir, tmpdir, helpers):
+ def check(out=False):
+ for p in "LICENSE", "sort-and-change-case.ga":
+ assert isinstance(crate.dereference(p), File)
+ for p in exclude + ["test/"]:
+ assert not crate.dereference(p)
+ if out:
+ assert not(crate.source / p).exists()
+ for e in crate.data_entities:
+ assert not(e.id.startswith("test"))
+ if out:
+ assert not(crate.source / "test").exists()
+ crate_dir = test_data_dir / "ro-crate-galaxy-sortchangecase"
+ (crate_dir / helpers.METADATA_FILE_NAME).unlink()
+ exclude = ["test", "README.md"]
+ crate = ROCrate(crate_dir, init=True, exclude=exclude)
+ check()
+ out_path = tmpdir / 'ro_crate_out'
+ crate.write(out_path)
+ crate = ROCrate(out_path)
+ check(out=True)
+
+
@pytest.mark.parametrize("gen_preview,preview_exists", [(False, False), (False, True), (True, False), (True, True)])
def test_init_preview(test_data_dir, tmpdir, helpers, gen_preview, preview_exists):
crate_dir = test_data_dir / "ro-crate-galaxy-sortchangecase"
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 3
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"flake8",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiohappyeyeballs==2.6.1
aiohttp==3.11.14
aiosignal==1.3.2
arcp==0.2.1
async-timeout==5.0.1
attrs==25.3.0
bioblend==1.5.0
CacheControl==0.14.2
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
frozenlist==1.5.0
galaxy2cwl==0.1.4
gxformat2==0.20.0
idna==3.10
iniconfig==2.1.0
isodate==0.7.2
Jinja2==3.1.6
MarkupSafe==3.0.2
mccabe==0.7.0
mistune==3.0.2
msgpack==1.1.0
multidict==6.2.0
mypy-extensions==1.0.0
packaging==24.2
pluggy==1.5.0
propcache==0.3.1
pycodestyle==2.13.0
pyflakes==3.3.2
pyparsing==3.2.3
pytest==8.3.5
python-dateutil==2.9.0.post0
PyYAML==6.0.2
rdflib==7.1.4
requests==2.32.3
requests-toolbelt==1.0.0
-e git+https://github.com/ResearchObject/ro-crate-py.git@043f7054c28d96128898435144049d1240ea6ea4#egg=rocrate
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.12
schema-salad==8.8.20250205075315
six==1.17.0
tinydb==4.8.2
tomli==2.2.1
tuspy==1.1.0
typing_extensions==4.13.0
urllib3==2.3.0
yarl==1.18.3
| name: ro-crate-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiohappyeyeballs==2.6.1
- aiohttp==3.11.14
- aiosignal==1.3.2
- arcp==0.2.1
- async-timeout==5.0.1
- attrs==25.3.0
- bioblend==1.5.0
- cachecontrol==0.14.2
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- frozenlist==1.5.0
- galaxy2cwl==0.1.4
- gxformat2==0.20.0
- idna==3.10
- iniconfig==2.1.0
- isodate==0.7.2
- jinja2==3.1.6
- markupsafe==3.0.2
- mccabe==0.7.0
- mistune==3.0.2
- msgpack==1.1.0
- multidict==6.2.0
- mypy-extensions==1.0.0
- packaging==24.2
- pluggy==1.5.0
- propcache==0.3.1
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pyparsing==3.2.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- rdflib==7.1.4
- requests==2.32.3
- requests-toolbelt==1.0.0
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.12
- schema-salad==8.8.20250205075315
- six==1.17.0
- tinydb==4.8.2
- tomli==2.2.1
- tuspy==1.1.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- yarl==1.18.3
prefix: /opt/conda/envs/ro-crate-py
| [
"test/test_cli.py::test_cli_init_exclude",
"test/test_cli.py::test_cli_add_test_metadata_explicit_ids[False]",
"test/test_read.py::test_exclude"
] | [] | [
"test/test_cli.py::test_cli_init[False-False]",
"test/test_cli.py::test_cli_init[False-True]",
"test/test_cli.py::test_cli_init[True-False]",
"test/test_cli.py::test_cli_init[True-True]",
"test/test_cli.py::test_cli_add_workflow[False]",
"test/test_cli.py::test_cli_add_workflow[True]",
"test/test_cli.py::test_cli_add_test_metadata[False]",
"test/test_cli.py::test_cli_add_test_metadata[True]",
"test/test_cli.py::test_cli_add_test_metadata_explicit_ids[True]",
"test/test_cli.py::test_cli_write_zip[False]",
"test/test_cli.py::test_cli_write_zip[True]",
"test/test_read.py::test_crate_dir_loading[False-False]",
"test/test_read.py::test_crate_dir_loading[True-False]",
"test/test_read.py::test_crate_dir_loading[True-True]",
"test/test_read.py::test_legacy_crate",
"test/test_read.py::test_bad_crate",
"test/test_read.py::test_init[False]",
"test/test_read.py::test_init[True]",
"test/test_read.py::test_init_preview[False-False]",
"test/test_read.py::test_init_preview[False-True]",
"test/test_read.py::test_init_preview[True-False]",
"test/test_read.py::test_init_preview[True-True]",
"test/test_read.py::test_no_parts",
"test/test_read.py::test_extra_data[False]",
"test/test_read.py::test_extra_data[True]",
"test/test_read.py::test_missing_dir",
"test/test_read.py::test_missing_file",
"test/test_read.py::test_generic_data_entity",
"test/test_read.py::test_root_conformsto",
"test/test_read.py::test_multi_type_context_entity"
] | [] | Apache License 2.0 | 12,376 | 1,453 | [
"rocrate/cli.py",
"rocrate/rocrate.py",
"rocrate/utils.py"
] |
|
zulip__zulip-terminal-1162 | 4318eb3c97c4faa8baac340faae9ff17256f9627 | 2022-03-11 06:12:06 | 3dcc08e0d4d3c52519594ea7d52c37d21bd54f7c | diff --git a/zulipterminal/model.py b/zulipterminal/model.py
index a53d2a6..f098e66 100644
--- a/zulipterminal/model.py
+++ b/zulipterminal/model.py
@@ -1427,7 +1427,7 @@ class Model:
assert event["type"] == "reaction"
message_id = event["message_id"]
# If the message is indexed
- if self.index["messages"][message_id] != {}:
+ if message_id in self.index["messages"]:
message = self.index["messages"][message_id]
if event["op"] == "add":
| KeyError: 'flags'

The following error was in my zulip-terminal-thread-exceptions.log:
Traceback (most recent call last):
File "/Users/kellyfoulk/Documents/code/zulip-terminal/zulipterminal/model.py", line 1265, in poll_for_events
self.event_actions[event['type']](event)
File "/Users/kellyfoulk/Documents/code/zulip-terminal/zulipterminal/model.py", line 1124, in _handle_update_message_flags_event
if flag_to_change not in msg['flags']:
KeyError: 'flags'
I was not doing anything in the terminal when this appeared. I had been on my browser and when I moved back to the terminal this was showing on the screen. This was about 1.5 hours after I had issue #919 occur and it was within the same zulip-terminal instance.
Running Zulip Terminal 0.6.0+git on mac in a Z shell | zulip/zulip-terminal | diff --git a/tests/model/test_model.py b/tests/model/test_model.py
index 7aab54f..d82ef87 100644
--- a/tests/model/test_model.py
+++ b/tests/model/test_model.py
@@ -2041,9 +2041,7 @@ class TestModel:
def _factory(msgs: MsgsType):
return {
"messages": {
- message_id: {}
- if reactions is None
- else {
+ message_id: {
"id": message_id,
"content": f"message content {message_id}",
"reactions": [
@@ -2061,6 +2059,7 @@ class TestModel:
],
}
for message_id, reactions in msgs
+ if reactions is not None
}
}
@@ -2082,7 +2081,7 @@ class TestModel:
)
model.index = reaction_event_index_factory(
[
- (unindexed_message_id, None),
+ (unindexed_message_id, None), # explicitly exclude
(2, [(1, "unicode_emoji", "1232", "thumbs_up")]),
(3, []),
]
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | arrow==1.2.3
attrs==25.3.0
autoflake==1.3.1
autopep8==1.5.7
beautifulsoup4==4.13.3
black==25.1.0
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.3
coverage==7.8.0
distro==1.9.0
exceptiongroup==1.2.2
flake8==3.9.2
flake8-continuation==1.0.5
flake8-quotes==3.2.0
gitlint==0.19.1
gitlint-core==0.19.1
idna==3.10
iniconfig==2.1.0
isort==5.7.0
lxml==5.3.1
mccabe==0.6.1
mypy==0.910
mypy_extensions==0.4.4
packaging==24.2
pathspec==0.12.1
platformdirs==4.3.7
pluggy==1.5.0
pudb==2017.1.4
py==1.11.0
pycodestyle==2.7.0
pyflakes==2.3.1
Pygments==2.19.1
pyperclip==1.9.0
pytest==6.2.5
pytest-cov==2.11.1
pytest-mock==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.32.3
sh==1.14.3
six==1.17.0
snakeviz==0.4.2
soupsieve==2.6
toml==0.10.2
tomli==2.2.1
tornado==6.4.2
types-python-dateutil==2.9.0.20241206
types-pytz==2025.2.0.20250326
types-requests==2.32.0.20250328
types-tzlocal==5.1.0.1
typing_extensions==4.13.0
tzlocal==5.3.1
urllib3==2.3.0
urwid==2.1.2
urwid_readline==0.15.1
zulip==0.9.0
-e git+https://github.com/zulip/zulip-terminal.git@4318eb3c97c4faa8baac340faae9ff17256f9627#egg=zulip_term
| name: zulip-terminal
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- arrow==1.2.3
- attrs==25.3.0
- autoflake==1.3.1
- autopep8==1.5.7
- beautifulsoup4==4.13.3
- black==25.1.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.3
- coverage==7.8.0
- distro==1.9.0
- exceptiongroup==1.2.2
- flake8==3.9.2
- flake8-continuation==1.0.5
- flake8-quotes==3.2.0
- gitlint==0.19.1
- gitlint-core==0.19.1
- idna==3.10
- iniconfig==2.1.0
- isort==5.7.0
- lxml==5.3.1
- mccabe==0.6.1
- mypy==0.910
- mypy-extensions==0.4.4
- packaging==24.2
- pathspec==0.12.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pudb==2017.1.4
- py==1.11.0
- pycodestyle==2.7.0
- pyflakes==2.3.1
- pygments==2.19.1
- pyperclip==1.9.0
- pytest==6.2.5
- pytest-cov==2.11.1
- pytest-mock==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.32.3
- sh==1.14.3
- six==1.17.0
- snakeviz==0.4.2
- soupsieve==2.6
- toml==0.10.2
- tomli==2.2.1
- tornado==6.4.2
- types-python-dateutil==2.9.0.20241206
- types-pytz==2025.2.0.20250326
- types-requests==2.32.0.20250328
- types-tzlocal==5.1.0.1
- typing-extensions==4.13.0
- tzlocal==5.3.1
- urllib3==2.3.0
- urwid==2.1.2
- urwid-readline==0.15.1
- zulip==0.9.0
- zulip-term==0.6.0+git
prefix: /opt/conda/envs/zulip-terminal
| [
"tests/model/test_model.py::TestModel::test__handle_reaction_event_not_in_index[add]",
"tests/model/test_model.py::TestModel::test__handle_reaction_event_not_in_index[remove]"
] | [] | [
"tests/model/test_model.py::TestModel::test_init",
"tests/model/test_model.py::TestModel::test_init_muted_topics[zulip_feature_level:None]",
"tests/model/test_model.py::TestModel::test_init_muted_topics[zulip_feature_level:1]",
"tests/model/test_model.py::TestModel::test_init_InvalidAPIKey_response",
"tests/model/test_model.py::TestModel::test_init_ZulipError_exception",
"tests/model/test_model.py::TestModel::test_register_initial_desired_events",
"tests/model/test_model.py::TestModel::test_normalize_and_cache_message_retention_text[ZFL=None_no_stream_retention_realm_retention=None]",
"tests/model/test_model.py::TestModel::test_normalize_and_cache_message_retention_text[ZFL=16_no_stream_retention_realm_retention=-1]",
"tests/model/test_model.py::TestModel::test_normalize_and_cache_message_retention_text[ZFL=17_stream_retention_days=30]",
"tests/model/test_model.py::TestModel::test_normalize_and_cache_message_retention_text[ZFL=18_stream_retention_days=[None,",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow0-1]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow0-5]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow0-msg_id2]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow1-1]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow1-5]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow1-msg_id2]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow2-1]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow2-5]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow2-msg_id2]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow3-1]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow3-5]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow3-msg_id2]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow4-1]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow4-5]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow4-msg_id2]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow5-1]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow5-5]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow5-msg_id2]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow6-1]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow6-5]",
"tests/model/test_model.py::TestModel::test_get_focus_in_current_narrow_individually[narrow6-msg_id2]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow0-1]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow0-5]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow1-1]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow1-5]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow2-1]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow2-5]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow3-1]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow3-5]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow4-1]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow4-5]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow5-1]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow5-5]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow6-1]",
"tests/model/test_model.py::TestModel::test_set_focus_in_current_narrow[narrow6-5]",
"tests/model/test_model.py::TestModel::test_is_search_narrow[narrow0-False]",
"tests/model/test_model.py::TestModel::test_is_search_narrow[narrow1-True]",
"tests/model/test_model.py::TestModel::test_is_search_narrow[narrow2-False]",
"tests/model/test_model.py::TestModel::test_is_search_narrow[narrow3-True]",
"tests/model/test_model.py::TestModel::test_is_search_narrow[narrow4-True]",
"tests/model/test_model.py::TestModel::test_is_search_narrow[narrow5-False]",
"tests/model/test_model.py::TestModel::test_is_search_narrow[narrow6-True]",
"tests/model/test_model.py::TestModel::test_is_search_narrow[narrow7-False]",
"tests/model/test_model.py::TestModel::test_is_search_narrow[narrow8-True]",
"tests/model/test_model.py::TestModel::test_is_search_narrow[narrow9-True]",
"tests/model/test_model.py::TestModel::test_is_search_narrow[narrow10-True]",
"tests/model/test_model.py::TestModel::test_set_narrow_bad_input[bad_args0]",
"tests/model/test_model.py::TestModel::test_set_narrow_bad_input[bad_args1]",
"tests/model/test_model.py::TestModel::test_set_narrow_bad_input[bad_args2]",
"tests/model/test_model.py::TestModel::test_set_narrow_bad_input[bad_args3]",
"tests/model/test_model.py::TestModel::test_set_narrow_already_set[narrow0-good_args0]",
"tests/model/test_model.py::TestModel::test_set_narrow_already_set[narrow1-good_args1]",
"tests/model/test_model.py::TestModel::test_set_narrow_already_set[narrow2-good_args2]",
"tests/model/test_model.py::TestModel::test_set_narrow_already_set[narrow3-good_args3]",
"tests/model/test_model.py::TestModel::test_set_narrow_already_set[narrow4-good_args4]",
"tests/model/test_model.py::TestModel::test_set_narrow_already_set[narrow5-good_args5]",
"tests/model/test_model.py::TestModel::test_set_narrow_already_set[narrow6-good_args6]",
"tests/model/test_model.py::TestModel::test_set_narrow_not_already_set[initial_narrow0-narrow0-good_args0]",
"tests/model/test_model.py::TestModel::test_set_narrow_not_already_set[initial_narrow1-narrow1-good_args1]",
"tests/model/test_model.py::TestModel::test_set_narrow_not_already_set[initial_narrow2-narrow2-good_args2]",
"tests/model/test_model.py::TestModel::test_set_narrow_not_already_set[initial_narrow3-narrow3-good_args3]",
"tests/model/test_model.py::TestModel::test_set_narrow_not_already_set[initial_narrow4-narrow4-good_args4]",
"tests/model/test_model.py::TestModel::test_set_narrow_not_already_set[initial_narrow5-narrow5-good_args5]",
"tests/model/test_model.py::TestModel::test_set_narrow_not_already_set[initial_narrow6-narrow6-good_args6]",
"tests/model/test_model.py::TestModel::test_get_message_ids_in_current_narrow[narrow0-index0-current_ids0]",
"tests/model/test_model.py::TestModel::test_get_message_ids_in_current_narrow[narrow1-index1-current_ids1]",
"tests/model/test_model.py::TestModel::test_get_message_ids_in_current_narrow[narrow2-index2-current_ids2]",
"tests/model/test_model.py::TestModel::test_get_message_ids_in_current_narrow[narrow3-index3-current_ids3]",
"tests/model/test_model.py::TestModel::test_get_message_ids_in_current_narrow[narrow4-index4-current_ids4]",
"tests/model/test_model.py::TestModel::test_get_message_ids_in_current_narrow[narrow5-index5-current_ids5]",
"tests/model/test_model.py::TestModel::test_get_message_ids_in_current_narrow[narrow6-index6-current_ids6]",
"tests/model/test_model.py::TestModel::test_get_message_ids_in_current_narrow[narrow7-index7-current_ids7]",
"tests/model/test_model.py::TestModel::test_get_message_ids_in_current_narrow[narrow8-index8-current_ids8]",
"tests/model/test_model.py::TestModel::test_get_message_ids_in_current_narrow[narrow9-index9-current_ids9]",
"tests/model/test_model.py::TestModel::test_get_message_ids_in_current_narrow[narrow10-index10-current_ids10]",
"tests/model/test_model.py::TestModel::test__fetch_topics_in_streams[response0-expected_index0-]",
"tests/model/test_model.py::TestModel::test__fetch_topics_in_streams[response1-expected_index1-]",
"tests/model/test_model.py::TestModel::test__fetch_topics_in_streams[response2-expected_index2-Some",
"tests/model/test_model.py::TestModel::test_topics_in_stream[topics_index0-False]",
"tests/model/test_model.py::TestModel::test_topics_in_stream[topics_index1-True]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_unicode_original_no_existing_emoji-user_id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_unicode_original_no_existing_emoji-id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_unicode_original_no_existing_emoji-None]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_realm_original_no_existing_emoji-user_id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_realm_original_no_existing_emoji-id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_realm_original_no_existing_emoji-None]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_unicode_original_mine_existing_different_emoji-user_id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_unicode_original_mine_existing_different_emoji-id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_unicode_original_mine_existing_different_emoji-None]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_zulip_original_mine_existing_different_emoji-user_id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_zulip_original_mine_existing_different_emoji-id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_zulip_original_mine_existing_different_emoji-None]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_unicode_original_others_existing_same_emoji-user_id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_unicode_original_others_existing_same_emoji-id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_unicode_original_others_existing_same_emoji-None]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_unicode_alias_others_existing_same_emoji-user_id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_unicode_alias_others_existing_same_emoji-id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[add_unicode_alias_others_existing_same_emoji-None]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[remove_unicode_original_mine_existing_same_emoji-user_id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[remove_unicode_original_mine_existing_same_emoji-id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[remove_unicode_original_mine_existing_same_emoji-None]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[remove_unicode_alias_mine_existing_same_emoji-user_id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[remove_unicode_alias_mine_existing_same_emoji-id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[remove_unicode_alias_mine_existing_same_emoji-None]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[remove_zulip_original_mine_existing_same_emoji-user_id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[remove_zulip_original_mine_existing_same_emoji-id]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_valid_emoji[remove_zulip_original_mine_existing_same_emoji-None]",
"tests/model/test_model.py::TestModel::test_toggle_message_reaction_with_invalid_emoji",
"tests/model/test_model.py::TestModel::test_has_user_reacted_to_message[id_inside_user_field__user_not_reacted]",
"tests/model/test_model.py::TestModel::test_has_user_reacted_to_message[user_id_inside_user_field__user_has_reacted]",
"tests/model/test_model.py::TestModel::test_has_user_reacted_to_message[no_user_field_with_user_id__user_has_reacted]",
"tests/model/test_model.py::TestModel::test_has_user_reacted_to_message[no_user_field_with_user_id__user_not_reacted]",
"tests/model/test_model.py::TestModel::test_send_typing_status_by_user_ids[start-recipient_user_ids0]",
"tests/model/test_model.py::TestModel::test_send_typing_status_by_user_ids[start-recipient_user_ids1]",
"tests/model/test_model.py::TestModel::test_send_typing_status_by_user_ids[stop-recipient_user_ids0]",
"tests/model/test_model.py::TestModel::test_send_typing_status_by_user_ids[stop-recipient_user_ids1]",
"tests/model/test_model.py::TestModel::test_send_typing_status_with_no_recipients[start]",
"tests/model/test_model.py::TestModel::test_send_typing_status_with_no_recipients[stop]",
"tests/model/test_model.py::TestModel::test_send_private_message[recipients0-response0-True]",
"tests/model/test_model.py::TestModel::test_send_private_message[recipients0-response1-False]",
"tests/model/test_model.py::TestModel::test_send_private_message[recipients1-response0-True]",
"tests/model/test_model.py::TestModel::test_send_private_message[recipients1-response1-False]",
"tests/model/test_model.py::TestModel::test_send_private_message_with_no_recipients",
"tests/model/test_model.py::TestModel::test_send_stream_message[response0-True]",
"tests/model/test_model.py::TestModel::test_send_stream_message[response1-False]",
"tests/model/test_model.py::TestModel::test_update_private_message[response0-True]",
"tests/model/test_model.py::TestModel::test_update_private_message[response1-False]",
"tests/model/test_model.py::TestModel::test_update_stream_message[req0-Some",
"tests/model/test_model.py::TestModel::test_update_stream_message[req1-Old",
"tests/model/test_model.py::TestModel::test_update_stream_message[req2-Old",
"tests/model/test_model.py::TestModel::test_update_stream_message[req3-terminal-False-response0-True]",
"tests/model/test_model.py::TestModel::test_update_stream_message[req3-terminal-False-response1-False]",
"tests/model/test_model.py::TestModel::test_update_stream_message[req4-greet-True-response0-True]",
"tests/model/test_model.py::TestModel::test_update_stream_message[req4-greet-True-response1-False]",
"tests/model/test_model.py::TestModel::test_update_stream_message[req5-lets_party-True-response0-True]",
"tests/model/test_model.py::TestModel::test_update_stream_message[req5-lets_party-True-response1-False]",
"tests/model/test_model.py::TestModel::test_success_get_messages",
"tests/model/test_model.py::TestModel::test_modernize_message_response[Zulip_4.0+_ZFL46_response_with_topic_links]",
"tests/model/test_model.py::TestModel::test_modernize_message_response[Zulip_3.0+_ZFL1_response_with_topic_links]",
"tests/model/test_model.py::TestModel::test_modernize_message_response[Zulip_3.0+_ZFL1_response_empty_topic_links]",
"tests/model/test_model.py::TestModel::test_modernize_message_response[Zulip_2.1+_response_with_subject_links]",
"tests/model/test_model.py::TestModel::test_modernize_message_response[Zulip_2.1+_response_empty_subject_links]",
"tests/model/test_model.py::TestModel::test__store_content_length_restrictions[Zulip_2.1.x_ZFL_None_no_restrictions]",
"tests/model/test_model.py::TestModel::test__store_content_length_restrictions[Zulip_3.1.x_ZFL_27_no_restrictions]",
"tests/model/test_model.py::TestModel::test__store_content_length_restrictions[Zulip_4.0.x_ZFL_52_no_restrictions]",
"tests/model/test_model.py::TestModel::test__store_content_length_restrictions[Zulip_4.0.x_ZFL_53_with_restrictions]",
"tests/model/test_model.py::TestModel::test_get_message_false_first_anchor",
"tests/model/test_model.py::TestModel::test_fail_get_messages",
"tests/model/test_model.py::TestModel::test_fetch_raw_message_content[response0-Feels",
"tests/model/test_model.py::TestModel::test_fetch_raw_message_content[response1-None-True]",
"tests/model/test_model.py::TestModel::test_toggle_stream_muted_status[muting_205]",
"tests/model/test_model.py::TestModel::test_toggle_stream_muted_status[unmuting_205]",
"tests/model/test_model.py::TestModel::test_toggle_stream_muted_status[first_muted_205]",
"tests/model/test_model.py::TestModel::test_toggle_stream_muted_status[last_unmuted_205]",
"tests/model/test_model.py::TestModel::test_toggle_message_star_status[flags_before0-add]",
"tests/model/test_model.py::TestModel::test_toggle_message_star_status[flags_before1-remove]",
"tests/model/test_model.py::TestModel::test_toggle_message_star_status[flags_before2-add]",
"tests/model/test_model.py::TestModel::test_toggle_message_star_status[flags_before3-remove]",
"tests/model/test_model.py::TestModel::test_toggle_message_star_status[flags_before4-remove]",
"tests/model/test_model.py::TestModel::test_mark_message_ids_as_read",
"tests/model/test_model.py::TestModel::test_mark_message_ids_as_read_empty_message_view",
"tests/model/test_model.py::TestModel::test__update_initial_data",
"tests/model/test_model.py::TestModel::test__update_initial_data_raises_exception",
"tests/model/test_model.py::TestModel::test__group_info_from_realm_user_groups",
"tests/model/test_model.py::TestModel::test_get_user_info[user_full_name]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_empty_full_name]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_email]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_empty_email]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_date_joined]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_empty_date_joined]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_timezone]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_empty_timezone]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_bot_type]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_empty_bot_type]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_is_owner:Zulip_4.0+_ZFL59]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_is_admin:Zulip_4.0+_ZFL59]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_is_moderator:Zulip_4.0+_ZFL60]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_is_guest:Zulip_4.0+_ZFL59]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_is_member]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_is_owner:Zulip_3.0+]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_is_admin:preZulip_4.0]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_is_guest:preZulip_4.0]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_is_bot]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_bot_has_owner:Zulip_3.0+_ZFL1]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_bot_has_owner:preZulip_3.0]",
"tests/model/test_model.py::TestModel::test_get_user_info[user_bot_has_no_owner]",
"tests/model/test_model.py::TestModel::test_get_user_info_USER_NOT_FOUND",
"tests/model/test_model.py::TestModel::test_get_user_info_sample_response",
"tests/model/test_model.py::TestModel::test_get_all_users",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled0-muted0]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled0-muted1]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled0-muted2]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled0-muted3]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled1-muted0]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled1-muted1]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled1-muted2]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled1-muted3]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled2-muted0]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled2-muted1]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled2-muted2]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled2-muted3]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled3-muted0]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled3-muted1]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled3-muted2]",
"tests/model/test_model.py::TestModel::test__subscribe_to_streams[visual_notification_enabled3-muted3]",
"tests/model/test_model.py::TestModel::test__handle_message_event_with_Falsey_log[stream_message]",
"tests/model/test_model.py::TestModel::test__handle_message_event_with_Falsey_log[pm_message]",
"tests/model/test_model.py::TestModel::test__handle_message_event_with_Falsey_log[group_pm_message]",
"tests/model/test_model.py::TestModel::test__handle_message_event_with_valid_log[stream_message]",
"tests/model/test_model.py::TestModel::test__handle_message_event_with_valid_log[pm_message]",
"tests/model/test_model.py::TestModel::test__handle_message_event_with_valid_log[group_pm_message]",
"tests/model/test_model.py::TestModel::test__handle_message_event_with_flags[stream_message]",
"tests/model/test_model.py::TestModel::test__handle_message_event_with_flags[pm_message]",
"tests/model/test_model.py::TestModel::test__handle_message_event_with_flags[group_pm_message]",
"tests/model/test_model.py::TestModel::test__handle_message_event[stream_to_all_messages]",
"tests/model/test_model.py::TestModel::test__handle_message_event[private_to_all_private]",
"tests/model/test_model.py::TestModel::test__handle_message_event[stream_to_stream]",
"tests/model/test_model.py::TestModel::test__handle_message_event[stream_to_topic]",
"tests/model/test_model.py::TestModel::test__handle_message_event[stream_to_different_stream_same_topic]",
"tests/model/test_model.py::TestModel::test__handle_message_event[user_pm_x_appears_in_narrow_with_x]",
"tests/model/test_model.py::TestModel::test__handle_message_event[search]",
"tests/model/test_model.py::TestModel::test__handle_message_event[user_pm_x_does_not_appear_in_narrow_without_x]",
"tests/model/test_model.py::TestModel::test__handle_message_event[mentioned_msg_in_mentioned_msg_narrow]",
"tests/model/test_model.py::TestModel::test__update_topic_index[reorder_topic3]",
"tests/model/test_model.py::TestModel::test__update_topic_index[topic1_discussion_continues]",
"tests/model/test_model.py::TestModel::test__update_topic_index[new_topic4]",
"tests/model/test_model.py::TestModel::test__update_topic_index[first_topic_1]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[stream_message-not_notified_since_self_message]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[stream_message-notified_stream_and_private_since_directly_mentioned]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[stream_message-notified_stream_and_private_since_wildcard_mentioned]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[stream_message-notified_stream_since_stream_has_desktop_notifications]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[stream_message-notified_private_since_private_message]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[pm_message-not_notified_since_self_message]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[pm_message-notified_stream_and_private_since_directly_mentioned]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[pm_message-notified_stream_and_private_since_wildcard_mentioned]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[pm_message-notified_stream_since_stream_has_desktop_notifications]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[pm_message-notified_private_since_private_message]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[group_pm_message-not_notified_since_self_message]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[group_pm_message-notified_stream_and_private_since_directly_mentioned]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[group_pm_message-notified_stream_and_private_since_wildcard_mentioned]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[group_pm_message-notified_stream_since_stream_has_desktop_notifications]",
"tests/model/test_model.py::TestModel::test_notify_users_calling_msg_type[group_pm_message-notified_private_since_private_message]",
"tests/model/test_model.py::TestModel::test_notify_users_enabled[stream_message-True-True]",
"tests/model/test_model.py::TestModel::test_notify_users_enabled[stream_message-False-False]",
"tests/model/test_model.py::TestModel::test_notify_users_enabled[pm_message-True-True]",
"tests/model/test_model.py::TestModel::test_notify_users_enabled[pm_message-False-False]",
"tests/model/test_model.py::TestModel::test_notify_users_enabled[group_pm_message-True-True]",
"tests/model/test_model.py::TestModel::test_notify_users_enabled[group_pm_message-False-False]",
"tests/model/test_model.py::TestModel::test__handle_update_message_event[Only",
"tests/model/test_model.py::TestModel::test__handle_update_message_event[Subject",
"tests/model/test_model.py::TestModel::test__handle_update_message_event[Message",
"tests/model/test_model.py::TestModel::test__handle_update_message_event[Both",
"tests/model/test_model.py::TestModel::test__handle_update_message_event[Some",
"tests/model/test_model.py::TestModel::test__handle_update_message_event[message_id",
"tests/model/test_model.py::TestModel::test__update_rendered_view[msgbox_updated_in_topic_narrow]",
"tests/model/test_model.py::TestModel::test__update_rendered_view[msgbox_removed_due_to_topic_narrow_mismatch]",
"tests/model/test_model.py::TestModel::test__update_rendered_view[msgbox_updated_in_all_messages_narrow]",
"tests/model/test_model.py::TestModel::test__update_rendered_view_change_narrow[same_topic_narrow]",
"tests/model/test_model.py::TestModel::test__update_rendered_view_change_narrow[previous_topic_narrow_empty_so_change_narrow]",
"tests/model/test_model.py::TestModel::test__update_rendered_view_change_narrow[same_all_messages_narrow]",
"tests/model/test_model.py::TestModel::test__handle_reaction_event_for_msg_in_index[add-2]",
"tests/model/test_model.py::TestModel::test__handle_reaction_event_for_msg_in_index[remove-1]",
"tests/model/test_model.py::TestModel::test_update_star_status_no_index[update_message_flags_operation0]",
"tests/model/test_model.py::TestModel::test_update_star_status_no_index[update_message_flags_operation1]",
"tests/model/test_model.py::TestModel::test_update_star_status_no_index[update_message_flags_operation2]",
"tests/model/test_model.py::TestModel::test_update_star_status_invalid_operation[update_message_flags_operation0]",
"tests/model/test_model.py::TestModel::test_update_star_status_invalid_operation[update_message_flags_operation1]",
"tests/model/test_model.py::TestModel::test_update_star_status_invalid_operation[update_message_flags_operation2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before0-flags_after0-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before0-flags_after0-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before0-flags_after0-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before0-flags_after0-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before0-flags_after0-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before0-flags_after0-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before1-flags_after1-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before1-flags_after1-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before1-flags_after1-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before1-flags_after1-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before1-flags_after1-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before1-flags_after1-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before2-flags_after2-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before2-flags_after2-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before2-flags_after2-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before2-flags_after2-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before2-flags_after2-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before2-flags_after2-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before3-flags_after3-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before3-flags_after3-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before3-flags_after3-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before3-flags_after3-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before3-flags_after3-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-add-1-flags_before3-flags_after3-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before4-flags_after4-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before4-flags_after4-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before4-flags_after4-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before4-flags_after4-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before4-flags_after4-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before4-flags_after4-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before5-flags_after5-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before5-flags_after5-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before5-flags_after5-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before5-flags_after5-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before5-flags_after5-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before5-flags_after5-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before6-flags_after6-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before6-flags_after6-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before6-flags_after6-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before6-flags_after6-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before6-flags_after6-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before6-flags_after6-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before7-flags_after7-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before7-flags_after7-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before7-flags_after7-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before7-flags_after7-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before7-flags_after7-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before7-flags_after7-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before8-flags_after8-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before8-flags_after8-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before8-flags_after8-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before8-flags_after8-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before8-flags_after8-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation0-remove--1-flags_before8-flags_after8-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before0-flags_after0-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before0-flags_after0-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before0-flags_after0-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before0-flags_after0-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before0-flags_after0-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before0-flags_after0-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before1-flags_after1-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before1-flags_after1-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before1-flags_after1-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before1-flags_after1-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before1-flags_after1-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before1-flags_after1-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before2-flags_after2-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before2-flags_after2-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before2-flags_after2-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before2-flags_after2-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before2-flags_after2-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before2-flags_after2-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before3-flags_after3-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before3-flags_after3-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before3-flags_after3-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before3-flags_after3-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before3-flags_after3-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-add-1-flags_before3-flags_after3-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before4-flags_after4-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before4-flags_after4-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before4-flags_after4-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before4-flags_after4-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before4-flags_after4-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before4-flags_after4-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before5-flags_after5-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before5-flags_after5-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before5-flags_after5-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before5-flags_after5-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before5-flags_after5-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before5-flags_after5-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before6-flags_after6-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before6-flags_after6-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before6-flags_after6-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before6-flags_after6-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before6-flags_after6-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before6-flags_after6-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before7-flags_after7-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before7-flags_after7-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before7-flags_after7-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before7-flags_after7-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before7-flags_after7-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before7-flags_after7-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before8-flags_after8-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before8-flags_after8-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before8-flags_after8-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before8-flags_after8-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before8-flags_after8-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation1-remove--1-flags_before8-flags_after8-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before0-flags_after0-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before0-flags_after0-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before0-flags_after0-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before0-flags_after0-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before0-flags_after0-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before0-flags_after0-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before1-flags_after1-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before1-flags_after1-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before1-flags_after1-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before1-flags_after1-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before1-flags_after1-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before1-flags_after1-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before2-flags_after2-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before2-flags_after2-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before2-flags_after2-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before2-flags_after2-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before2-flags_after2-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before2-flags_after2-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before3-flags_after3-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before3-flags_after3-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before3-flags_after3-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before3-flags_after3-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before3-flags_after3-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-add-1-flags_before3-flags_after3-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before4-flags_after4-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before4-flags_after4-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before4-flags_after4-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before4-flags_after4-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before4-flags_after4-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before4-flags_after4-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before5-flags_after5-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before5-flags_after5-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before5-flags_after5-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before5-flags_after5-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before5-flags_after5-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before5-flags_after5-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before6-flags_after6-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before6-flags_after6-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before6-flags_after6-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before6-flags_after6-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before6-flags_after6-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before6-flags_after6-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before7-flags_after7-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before7-flags_after7-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before7-flags_after7-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before7-flags_after7-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before7-flags_after7-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before7-flags_after7-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before8-flags_after8-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before8-flags_after8-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before8-flags_after8-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before8-flags_after8-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before8-flags_after8-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_star_status[update_message_flags_operation2-remove--1-flags_before8-flags_after8-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before0-flags_after0-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before0-flags_after0-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before0-flags_after0-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before0-flags_after0-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before0-flags_after0-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before0-flags_after0-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before1-flags_after1-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before1-flags_after1-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before1-flags_after1-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before1-flags_after1-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before1-flags_after1-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before1-flags_after1-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before2-flags_after2-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before2-flags_after2-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before2-flags_after2-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before2-flags_after2-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before2-flags_after2-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before2-flags_after2-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before3-flags_after3-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before3-flags_after3-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before3-flags_after3-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before3-flags_after3-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before3-flags_after3-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-add-flags_before3-flags_after3-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before4-flags_after4-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before4-flags_after4-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before4-flags_after4-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before4-flags_after4-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before4-flags_after4-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before4-flags_after4-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before5-flags_after5-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before5-flags_after5-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before5-flags_after5-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before5-flags_after5-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before5-flags_after5-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before5-flags_after5-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before6-flags_after6-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before6-flags_after6-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before6-flags_after6-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before6-flags_after6-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before6-flags_after6-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before6-flags_after6-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before7-flags_after7-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before7-flags_after7-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before7-flags_after7-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before7-flags_after7-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before7-flags_after7-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before7-flags_after7-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before8-flags_after8-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before8-flags_after8-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before8-flags_after8-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before8-flags_after8-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before8-flags_after8-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation0-remove-flags_before8-flags_after8-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before0-flags_after0-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before0-flags_after0-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before0-flags_after0-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before0-flags_after0-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before0-flags_after0-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before0-flags_after0-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before1-flags_after1-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before1-flags_after1-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before1-flags_after1-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before1-flags_after1-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before1-flags_after1-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before1-flags_after1-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before2-flags_after2-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before2-flags_after2-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before2-flags_after2-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before2-flags_after2-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before2-flags_after2-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before2-flags_after2-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before3-flags_after3-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before3-flags_after3-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before3-flags_after3-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before3-flags_after3-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before3-flags_after3-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-add-flags_before3-flags_after3-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before4-flags_after4-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before4-flags_after4-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before4-flags_after4-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before4-flags_after4-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before4-flags_after4-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before4-flags_after4-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before5-flags_after5-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before5-flags_after5-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before5-flags_after5-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before5-flags_after5-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before5-flags_after5-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before5-flags_after5-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before6-flags_after6-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before6-flags_after6-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before6-flags_after6-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before6-flags_after6-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before6-flags_after6-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before6-flags_after6-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before7-flags_after7-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before7-flags_after7-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before7-flags_after7-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before7-flags_after7-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before7-flags_after7-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before7-flags_after7-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before8-flags_after8-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before8-flags_after8-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before8-flags_after8-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before8-flags_after8-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before8-flags_after8-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation1-remove-flags_before8-flags_after8-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before0-flags_after0-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before0-flags_after0-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before0-flags_after0-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before0-flags_after0-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before0-flags_after0-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before0-flags_after0-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before1-flags_after1-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before1-flags_after1-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before1-flags_after1-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before1-flags_after1-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before1-flags_after1-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before1-flags_after1-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before2-flags_after2-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before2-flags_after2-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before2-flags_after2-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before2-flags_after2-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before2-flags_after2-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before2-flags_after2-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before3-flags_after3-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before3-flags_after3-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before3-flags_after3-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before3-flags_after3-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before3-flags_after3-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-add-flags_before3-flags_after3-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before4-flags_after4-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before4-flags_after4-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before4-flags_after4-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before4-flags_after4-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before4-flags_after4-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before4-flags_after4-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before5-flags_after5-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before5-flags_after5-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before5-flags_after5-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before5-flags_after5-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before5-flags_after5-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before5-flags_after5-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before6-flags_after6-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before6-flags_after6-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before6-flags_after6-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before6-flags_after6-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before6-flags_after6-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before6-flags_after6-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before7-flags_after7-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before7-flags_after7-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before7-flags_after7-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before7-flags_after7-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before7-flags_after7-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before7-flags_after7-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before8-flags_after8-event_message_ids0-indexed_ids0]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before8-flags_after8-event_message_ids1-indexed_ids1]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before8-flags_after8-event_message_ids2-indexed_ids2]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before8-flags_after8-event_message_ids3-indexed_ids3]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before8-flags_after8-event_message_ids4-indexed_ids4]",
"tests/model/test_model.py::TestModel::test_update_read_status[update_message_flags_operation2-remove-flags_before8-flags_after8-event_message_ids5-indexed_ids5]",
"tests/model/test_model.py::TestModel::test_toggle_stream_pinned_status[pinning]",
"tests/model/test_model.py::TestModel::test_toggle_stream_pinned_status[unpinning]",
"tests/model/test_model.py::TestModel::test_toggle_stream_pinned_status[first_pinned]",
"tests/model/test_model.py::TestModel::test_toggle_stream_pinned_status[last_unpinned]",
"tests/model/test_model.py::TestModel::test_toggle_stream_visual_notifications[visual_notification_enable_205]",
"tests/model/test_model.py::TestModel::test_toggle_stream_visual_notifications[visual_notification_disable_205]",
"tests/model/test_model.py::TestModel::test_toggle_stream_visual_notifications[first_notification_enable_205]",
"tests/model/test_model.py::TestModel::test_toggle_stream_visual_notifications[last_notification_disable_205]",
"tests/model/test_model.py::TestModel::test__handle_typing_event[not_in_pm_narrow]",
"tests/model/test_model.py::TestModel::test__handle_typing_event[not_in_pm_narrow_with_sender]",
"tests/model/test_model.py::TestModel::test__handle_typing_event[in_pm_narrow_with_sender_typing:start]",
"tests/model/test_model.py::TestModel::test__handle_typing_event[in_pm_narrow_with_sender_typing:start_while_animation_in_progress]",
"tests/model/test_model.py::TestModel::test__handle_typing_event[in_pm_narrow_with_sender_typing:stop]",
"tests/model/test_model.py::TestModel::test__handle_typing_event[in_pm_narrow_with_other_myself_typing:start]",
"tests/model/test_model.py::TestModel::test__handle_typing_event[in_pm_narrow_with_other_myself_typing:stop]",
"tests/model/test_model.py::TestModel::test__handle_typing_event[in_pm_narrow_with_oneself:start]",
"tests/model/test_model.py::TestModel::test__handle_typing_event[in_pm_narrow_with_oneself:stop]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_mute_streams[remove_19]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_mute_streams[add_30]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_pin_streams[pin_stream]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_pin_streams[unpin_stream]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_visual_notifications[remove_visual_notified_stream_15:present]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_visual_notifications[add_visual_notified_stream_19:not_present]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_visual_notifications[remove_visual_notified_stream_15:not_present]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_visual_notifications[add_visual_notified_stream_19:present]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers[user_subscribed_to_stream:ZFLNone]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers[user_subscribed_to_stream:ZFL34]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers[user_subscribed_to_stream:ZFL34shouldbe35]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers[user_subscribed_to_stream:ZFL35]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers[user_unsubscribed_from_stream:ZFLNone]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers[user_unsubscribed_from_stream:ZFL34]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers[user_unsubscribed_from_stream:ZFL34shouldbe35]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers[user_unsubscribed_from_stream:ZFL35]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers_to_unsubscribed_streams[peer_subscribed_to_stream_that_user_is_unsubscribed_to]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers_to_unsubscribed_streams[peer_subscribed_to_stream_that_user_is_unsubscribed_to:ZFL35+]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers_to_unsubscribed_streams[peer_unsubscribed_from_stream_that_user_is_unsubscribed_to]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers_to_unsubscribed_streams[peer_unsubscribed_from_stream_that_user_is_unsubscribed_to:ZFL35+]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers_multiple_users_one_stream[users_subscribed_to_stream:ZFL34shouldbe35]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers_multiple_users_one_stream[users_subscribed_to_stream:ZFL35]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers_multiple_users_one_stream[users_unsubscribed_from_stream:ZFL34shouldbe35]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers_multiple_users_one_stream[users_unsubscribed_from_stream:ZFL35]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers_one_user_multiple_streams[user_subscribed_to_streams:ZFL34shouldbe35]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers_one_user_multiple_streams[user_subscribed_to_streams:ZFL35]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers_one_user_multiple_streams[user_unsubscribed_from_streams:ZFL34shouldbe35]",
"tests/model/test_model.py::TestModel::test__handle_subscription_event_subscribers_one_user_multiple_streams[user_unsubscribed_from_streams:ZFL35]",
"tests/model/test_model.py::TestModel::test_update_twenty_four_hour_format[True]",
"tests/model/test_model.py::TestModel::test_update_twenty_four_hour_format[False]",
"tests/model/test_model.py::TestModel::test_is_muted_stream[muted_stream]",
"tests/model/test_model.py::TestModel::test_is_muted_stream[unmuted_stream]",
"tests/model/test_model.py::TestModel::test_is_muted_stream[unmuted_stream_nostreamsmuted]",
"tests/model/test_model.py::TestModel::test_is_visual_notifications_enabled[notifications_enabled]",
"tests/model/test_model.py::TestModel::test_is_visual_notifications_enabled[notifications_disabled]",
"tests/model/test_model.py::TestModel::test_is_visual_notifications_enabled[notifications_disabled_no_streams_to_notify]",
"tests/model/test_model.py::TestModel::test_is_muted_topic[zulip_feature_level:None-topic0-False]",
"tests/model/test_model.py::TestModel::test_is_muted_topic[zulip_feature_level:None-topic1-True]",
"tests/model/test_model.py::TestModel::test_is_muted_topic[zulip_feature_level:None-topic2-True]",
"tests/model/test_model.py::TestModel::test_is_muted_topic[zulip_feature_level:None-topic3-False]",
"tests/model/test_model.py::TestModel::test_is_muted_topic[zulip_feature_level:1-topic0-False]",
"tests/model/test_model.py::TestModel::test_is_muted_topic[zulip_feature_level:1-topic1-True]",
"tests/model/test_model.py::TestModel::test_is_muted_topic[zulip_feature_level:1-topic2-True]",
"tests/model/test_model.py::TestModel::test_is_muted_topic[zulip_feature_level:1-topic3-False]",
"tests/model/test_model.py::TestModel::test_is_user_subscribed_to_stream[subscribed_stream]",
"tests/model/test_model.py::TestModel::test_is_user_subscribed_to_stream[unsubscribed_stream]",
"tests/model/test_model.py::TestModel::test_fetch_message_history_success[unedited_message-response0]",
"tests/model/test_model.py::TestModel::test_fetch_message_history_success[edited_message-response0]",
"tests/model/test_model.py::TestModel::test_fetch_message_history_error[response0]",
"tests/model/test_model.py::TestModel::test_user_name_from_id_valid[1001-Human",
"tests/model/test_model.py::TestModel::test_user_name_from_id_invalid[-1]",
"tests/model/test_model.py::TestModel::test_generate_all_emoji_data",
"tests/model/test_model.py::TestModel::test__handle_update_emoji_event[realm_emoji_with_same_name_as_unicode_emoji_added]",
"tests/model/test_model.py::TestModel::test__handle_update_emoji_event[realm_emoji_with_same_name_as_unicode_emoji_removed]",
"tests/model/test_model.py::TestModel::test__handle_update_emoji_event[realm_emoji_with_name_as_zulip_added]",
"tests/model/test_model.py::TestModel::test__handle_update_emoji_event[realm_emoji_with_name_as_zulip_removed]",
"tests/model/test_model.py::TestModel::test__handle_update_emoji_event[realm_emoji_added]",
"tests/model/test_model.py::TestModel::test__handle_update_emoji_event[realm_emoji_removed]",
"tests/model/test_model.py::TestModel::test_poll_for_events__no_disconnect",
"tests/model/test_model.py::TestModel::test_poll_for_events__reconnect_ok[reconnect_on_1st_attempt]",
"tests/model/test_model.py::TestModel::test_poll_for_events__reconnect_ok[reconnect_on_2nd_attempt]",
"tests/model/test_model.py::TestModel::test_poll_for_events__reconnect_ok[reconnect_on_3rd_attempt]"
] | [] | Apache License 2.0 | 12,389 | 157 | [
"zulipterminal/model.py"
] |
|
coin-or__pulp-530 | 62f3d08d7f2899e5b941a9efbcc616303a6c21df | 2022-03-12 09:28:55 | 5e025f057b8f5f824c53caae2cb6f3d455288d61 | CLAassistant: [](https://cla-assistant.io/coin-or/pulp?pullRequest=530) <br/>Thank you for your submission! We really appreciate it. Like many open source projects, we ask that you sign our [Contributor License Agreement](https://cla-assistant.io/coin-or/pulp?pullRequest=530) before we can accept your contribution.<br/><hr/>**dimitrod** seems not to be a GitHub user. You need a GitHub account to be able to sign the CLA. If you have already a GitHub account, please [add the email address used for this commit to your account](https://help.github.com/articles/why-are-my-commits-linked-to-the-wrong-user/#commits-are-not-linked-to-any-user).<br/><sub>You have signed the CLA already but the status is still pending? Let us [recheck](https://cla-assistant.io/check/coin-or/pulp?pullRequest=530) it.</sub>
pchtsp: thanks! | diff --git a/pulp/mps_lp.py b/pulp/mps_lp.py
index d9ddb11..9cd3932 100644
--- a/pulp/mps_lp.py
+++ b/pulp/mps_lp.py
@@ -183,6 +183,8 @@ def readMPSSetBounds(line, variable_dict):
def readMPSSetRhs(line, constraintsDict):
constraintsDict[line[1]]["constant"] = -float(line[2])
+ if len(line) == 5: # read fields 5, 6
+ constraintsDict[line[3]]["constant"] = -float(line[4])
return
| Incorrect MPS import
Details for the issue
--------------------
Consider the MPS file (taken from [here](http://lpsolve.sourceforge.net/5.5/mps-format.htm))
```
NAME TESTPROB
ROWS
N COST
L LIM1
G LIM2
E MYEQN
COLUMNS
XONE COST 1 LIM1 1
XONE LIM2 1
YTWO COST 4 LIM1 1
YTWO MYEQN -1
ZTHREE COST 9 LIM2 1
ZTHREE MYEQN 1
RHS
RHS1 LIM1 5 LIM2 10
RHS1 MYEQN 7
BOUNDS
UP BND1 XONE 4
LO BND1 YTWO -1
UP BND1 YTWO 1
ENDATA
```
#### What did you do?
Executed `_, problem = pulp.LpProblem.fromMPS('test.mps')`
#### What did you expect to see?
I expected for the `LIM2` constraint to be `LIM2: XONE + ZTHREE >= 10`
#### What did you see instead?
```
TESTPROB:
MINIMIZE
1.0*XONE + 4.0*YTWO + 9.0*ZTHREE + 0
SUBJECT TO
LIM1: XONE + YTWO <= 5
LIM2: XONE + ZTHREE >= 0
MYEQN: - YTWO + ZTHREE = 7
VARIABLES
XONE <= 4 Continuous
-1 <= YTWO <= 1 Continuous
ZTHREE Continuous
```
In my test, all MPS files whose `RHS` section contains definitions in Fields 5 & 6 are not imported correctly. The following modification of the MPS file is imported correctly (I simply manually moved the `LIM2` RHS specification):
```
NAME TESTPROB
ROWS
N COST
L LIM1
G LIM2
E MYEQN
COLUMNS
XONE COST 1 LIM1 1
XONE LIM2 1
YTWO COST 4 LIM1 1
YTWO MYEQN -1
ZTHREE COST 9 LIM2 1
ZTHREE MYEQN 1
RHS
RHS1 LIM1 5
RHS1 LIM2 10
RHS1 MYEQN 7
BOUNDS
UP BND1 XONE 4
LO BND1 YTWO -1
UP BND1 YTWO 1
ENDATA
```
I tested with [pysmps](https://github.com/jmaerte/pysmps) and it imports both versions of the MPS file correctly.
Useful extra information
-------------------------
#### What operating system are you using?
Linux: (Fedora)
#### I'm using python version:
`3.10.2`
#### I installed PuLP via:
- [X] pypi (python -m pip install pulp)
- [X] github (python -m pip install -U git+https://github.com/coin-or/pulp)
#### Did you also
- [X] Tried out the latest github version: https://github.com/coin-or/pulp
- [X] Searched for an existing similar issue: https://github.com/coin-or/pulp/issues?utf8=%E2%9C%93&q=is%3Aissue%20
It seems related to #459 | coin-or/pulp | diff --git a/pulp/tests/test_pulp.py b/pulp/tests/test_pulp.py
index 059de67..b7d476c 100644
--- a/pulp/tests/test_pulp.py
+++ b/pulp/tests/test_pulp.py
@@ -1,6 +1,9 @@
"""
Tests for pulp
"""
+import os
+import tempfile
+
from pulp.constants import PulpError
from pulp.apis import *
from pulp import LpVariable, LpProblem, lpSum, LpConstraintVar, LpFractionConstraint
@@ -9,6 +12,29 @@ from pulp.tests.bin_packing_problem import create_bin_packing_problem
from pulp.utilities import makeDict
import unittest
+# from: http://lpsolve.sourceforge.net/5.5/mps-format.htm
+EXAMPLE_MPS_RHS56 = """NAME TESTPROB
+ROWS
+ N COST
+ L LIM1
+ G LIM2
+ E MYEQN
+COLUMNS
+ XONE COST 1 LIM1 1
+ XONE LIM2 1
+ YTWO COST 4 LIM1 1
+ YTWO MYEQN -1
+ ZTHREE COST 9 LIM2 1
+ ZTHREE MYEQN 1
+RHS
+ RHS1 LIM1 5 LIM2 10
+ RHS1 MYEQN 7
+BOUNDS
+ UP BND1 XONE 4
+ LO BND1 YTWO -1
+ UP BND1 YTWO 1
+ENDATA
+"""
def dumpTestProblem(prob):
try:
@@ -1079,6 +1105,14 @@ class BaseSolverTest:
print("\t Testing reading MPS files - binary variable, no constraint names")
self.assertDictEqual(_dict1, _dict2)
+ def test_importMPS_RHS_fields56(self):
+ """Import MPS file with RHS definitions in fields 5 & 6."""
+ with tempfile.NamedTemporaryFile(delete=False) as h:
+ h.write(str.encode(EXAMPLE_MPS_RHS56))
+ _, problem = LpProblem.fromMPS(h.name)
+ os.unlink(h.name)
+ self.assertEqual(problem.constraints['LIM2'].constant, -10)
+
# def test_importMPS_2(self):
# name = self._testMethodName
# # filename = name + ".mps"
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 2.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
babel==2.17.0
certifi==2025.1.31
cfgv==3.4.0
charset-normalizer==3.4.1
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
filelock==3.18.0
identify==2.6.9
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
pre-commit==2.12.0
-e git+https://github.com/coin-or/pulp.git@62f3d08d7f2899e5b941a9efbcc616303a6c21df#egg=PuLP
Pygments==2.19.1
pytest==8.3.5
PyYAML==6.0.2
requests==2.32.3
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
toml==0.10.2
tomli==2.2.1
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: pulp
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- babel==2.17.0
- certifi==2025.1.31
- cfgv==3.4.0
- charset-normalizer==3.4.1
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- filelock==3.18.0
- identify==2.6.9
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==2.12.0
- pygments==2.19.1
- pytest==8.3.5
- pyyaml==6.0.2
- requests==2.32.3
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinx-rtd-theme==3.0.2
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- toml==0.10.2
- tomli==2.2.1
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/pulp
| [
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_importMPS_RHS_fields56"
] | [] | [
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_LpVariable_indexs_deprecation_logic",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_LpVariable_indexs_param",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_LpVariable_indices_param",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_assignInvalidStatus",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_export_dict_LP",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_export_dict_LP_no_obj",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_export_dict_MIP",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_export_dict_max",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_export_json_LP",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_export_solver_dict_LP",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_export_solver_json",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_false_constraint",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_importMPS_binary",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_importMPS_integer",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_importMPS_maximize",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_infeasible_problem__is_not_valid",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_invalid_var_names",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_logPath",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_makeDict_behavior",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_makeDict_default_value",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_measuring_solving_time",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulpTestAll",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_001",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_009",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_010",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_011",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_012",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_013",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_014",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_015",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_016",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_017",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_018",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_019",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_020",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_021",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_022",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_023",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_030",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_040",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_050",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_060",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_061",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_070",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_075",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_080",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_090",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_100",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_110",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_120",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_121",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_122",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_pulp_123",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_timeLimit",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_unbounded_problem__is_not_valid",
"pulp/tests/test_pulp.py::PULP_CBC_CMDTest::test_unset_objective_value__is_valid"
] | [] | MIT License | 12,399 | 154 | [
"pulp/mps_lp.py"
] |
sqlfluff__sqlfluff-2849 | 0bbd70f38a3318b9a488d988d06e8005e222d6ac | 2022-03-12 21:48:15 | 466b2b988d87c438ece05d6a9b552e11449592b3 | codecov[bot]: # [Codecov](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2849?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff) Report
> Merging [#2849](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2849?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff) (f158da4) into [main](https://codecov.io/gh/sqlfluff/sqlfluff/commit/918af576ea009ce70c88ef6f0bab86de2992f511?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff) (918af57) will **not change** coverage.
> The diff coverage is `100.00%`.
[](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2849?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff)
```diff
@@ Coverage Diff @@
## main #2849 +/- ##
=========================================
Coverage 100.00% 100.00%
=========================================
Files 163 163
Lines 12415 12428 +13
=========================================
+ Hits 12415 12428 +13
```
| [Impacted Files](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2849?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff) | Coverage Δ | |
|---|---|---|
| [src/sqlfluff/core/templaters/base.py](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2849/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff#diff-c3JjL3NxbGZsdWZmL2NvcmUvdGVtcGxhdGVycy9iYXNlLnB5) | `100.00% <100.00%> (ø)` | |
| [src/sqlfluff/core/templaters/slicers/tracer.py](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2849/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff#diff-c3JjL3NxbGZsdWZmL2NvcmUvdGVtcGxhdGVycy9zbGljZXJzL3RyYWNlci5weQ==) | `100.00% <100.00%> (ø)` | |
------
[Continue to review full report at Codecov](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2849?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2849?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff). Last update [918af57...f158da4](https://codecov.io/gh/sqlfluff/sqlfluff/pull/2849?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=sqlfluff).
| diff --git a/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py b/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py
index b4cdfaf67..61d80cb47 100644
--- a/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py
+++ b/plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py
@@ -522,17 +522,21 @@ class DbtTemplater(JinjaTemplater):
# sliced_file to reflect the mapping of the added character(s) back
# to the raw SQL.
templated_sql = templated_sql + "\n" * n_trailing_newlines
- sliced_file.append(
- TemplatedFileSlice(
- slice_type="literal",
- source_slice=slice(
- len(source_dbt_sql) - n_trailing_newlines, len(source_dbt_sql)
- ),
- templated_slice=slice(
- len(templated_sql) - n_trailing_newlines, len(templated_sql)
- ),
+ if sliced_file and sliced_file[-1].templated_slice.stop != len(
+ templated_sql
+ ):
+ sliced_file.append(
+ TemplatedFileSlice(
+ slice_type="literal",
+ source_slice=slice(
+ len(source_dbt_sql) - n_trailing_newlines,
+ len(source_dbt_sql),
+ ),
+ templated_slice=slice(
+ len(templated_sql) - n_trailing_newlines, len(templated_sql)
+ ),
+ )
)
- )
return (
TemplatedFile(
source_str=source_dbt_sql,
diff --git a/src/sqlfluff/core/templaters/base.py b/src/sqlfluff/core/templaters/base.py
index 5ed8b6542..938d823a0 100644
--- a/src/sqlfluff/core/templaters/base.py
+++ b/src/sqlfluff/core/templaters/base.py
@@ -73,6 +73,7 @@ class TemplatedFile:
templated_str: Optional[str] = None,
sliced_file: Optional[List[TemplatedFileSlice]] = None,
raw_sliced: Optional[List[RawFileSlice]] = None,
+ check_consistency=True,
):
"""Initialise the TemplatedFile.
@@ -104,6 +105,36 @@ class TemplatedFile:
self._source_newlines = list(iter_indices_of_newlines(self.source_str))
self._templated_newlines = list(iter_indices_of_newlines(self.templated_str))
+ # NOTE: The "check_consistency" flag should always be True when using
+ # SQLFluff in real life. This flag was only added because some legacy
+ # templater tests in test/core/templaters/jinja_test.py use hardcoded
+ # test data with issues that will trigger errors here. It would be cool
+ # to fix that data someday. I (Barry H.) started looking into it, but
+ # it was much trickier than I expected, because bits of the same data
+ # are shared across multiple tests.
+ if check_consistency:
+ # Sanity check raw string and slices.
+ pos = 0
+ rfs: RawFileSlice
+ for idx, rfs in enumerate(self.raw_sliced):
+ assert rfs.source_idx == pos
+ pos += len(rfs.raw)
+ assert pos == len(self.source_str)
+
+ # Sanity check templated string and slices.
+ previous_slice = None
+ tfs: Optional[TemplatedFileSlice] = None
+ for idx, tfs in enumerate(self.sliced_file):
+ if previous_slice:
+ assert (
+ tfs.templated_slice.start == previous_slice.templated_slice.stop
+ )
+ else:
+ assert tfs.templated_slice.start == 0
+ previous_slice = tfs
+ if self.sliced_file and templated_str is not None:
+ assert tfs.templated_slice.stop == len(templated_str)
+
@classmethod
def from_string(cls, raw):
"""Create TemplatedFile from a string."""
diff --git a/src/sqlfluff/core/templaters/slicers/tracer.py b/src/sqlfluff/core/templaters/slicers/tracer.py
index 8ce3794b1..5c2aa264d 100644
--- a/src/sqlfluff/core/templaters/slicers/tracer.py
+++ b/src/sqlfluff/core/templaters/slicers/tracer.py
@@ -77,9 +77,6 @@ class JinjaTracer:
except IndexError:
pos2 = len(trace_template_output)
p = trace_template_output[pos1 + 1 : pos2]
- is_set_or_macro = p[:3] == "set"
- if is_set_or_macro:
- p = p[3:]
m_id = regex.match(r"^([0-9a-f]+)(_(\d+))?", p)
if not m_id:
raise ValueError( # pragma: no cover
@@ -98,18 +95,7 @@ class JinjaTracer:
alt_id, content_info, literal = value
target_slice_idx = self.find_slice_index(alt_id)
slice_length = content_info if literal else len(str(content_info))
- if not is_set_or_macro:
- self.move_to_slice(target_slice_idx, slice_length)
- else:
- # If we find output from a {% set %} directive or a macro,
- # record a trace without reading or updating the program
- # counter. Such slices are always treated as "templated"
- # because they are inserted during expansion of templated
- # code (i.e. {% set %} variable or macro defined within the
- # file).
- self.record_trace(
- slice_length, target_slice_idx, slice_type="templated"
- )
+ self.move_to_slice(target_slice_idx, slice_length)
return JinjaTrace(
self.make_template(self.raw_str).render(), self.raw_sliced, self.sliced_file
)
@@ -241,9 +227,17 @@ class JinjaTracer:
idx,
)
)
- self.raw_slice_info[result[-1]] = self.slice_info_for_literal(
- len(raw), "" if set_idx is None else "set"
- )
+ if set_idx is None:
+ rsi = self.slice_info_for_literal(
+ len(raw), "" if set_idx is None else "set"
+ )
+ else:
+ # For "set" blocks, don't generate alternate ID or code.
+ # Sometimes, dbt users use {% set %} blocks to generate
+ # queries that get sent to actual databases, thus causing
+ # errors if we tamper with it.
+ rsi = RawSliceInfo(None, None, [])
+ self.raw_slice_info[result[-1]] = rsi
idx += len(raw)
continue
str_buff += raw
@@ -326,15 +320,20 @@ class JinjaTracer:
# effects, but return a unique slice ID.
if trimmed_content:
assert m_open and m_close
- unique_id = self.next_slice_id()
- unique_alternate_id = unique_id
- prefix = "set" if set_idx is not None else ""
- open_ = m_open.group(1)
- close_ = m_close.group(1)
- alternate_code = (
- f"\0{prefix}{unique_alternate_id} {open_} "
- f"{trimmed_content} {close_}"
- )
+ # For "set" blocks, don't generate alternate ID or
+ # code. Sometimes, dbt users use {% set %} blocks to
+ # generate queries that get sent to actual
+ # databases, thus causing errors if we tamper with
+ # it.
+ if set_idx is None:
+ unique_id = self.next_slice_id()
+ unique_alternate_id = unique_id
+ open_ = m_open.group(1)
+ close_ = m_close.group(1)
+ alternate_code = (
+ f"\0{unique_alternate_id} {open_} "
+ f"{trimmed_content} {close_}"
+ )
if block_type == "block_start" and trimmed_content.split()[0] in (
"macro",
"set",
@@ -343,16 +342,24 @@ class JinjaTracer:
# - {% set variable = value %}
# - {% set variable %}value{% endset %}
# https://jinja.palletsprojects.com/en/2.10.x/templates/#block-assignments
- # When the second format is used, set the variable 'is_set'
+ # When the second format is used, set the variable 'set_idx'
# to a non-None value. This info is used elsewhere, as
# literals inside a {% set %} block require special handling
# during the trace.
trimmed_content_parts = trimmed_content.split(maxsplit=2)
- if len(trimmed_content_parts) <= 2 or not trimmed_content_parts[
- 2
- ].startswith("="):
+ if len(trimmed_content_parts) <= 2 or (
+ not trimmed_content_parts[1].endswith("=")
+ and not trimmed_content_parts[2].startswith("=")
+ ):
set_idx = len(result)
- elif block_type == "block_end" and set_idx is not None:
+ elif (
+ block_type == "block_end"
+ and set_idx is not None
+ and (
+ trimmed_content.startswith("endset")
+ or trimmed_content.startswith("endmacro")
+ )
+ ):
# Exiting a {% set %} block. Clear the indicator variable.
set_idx = None
m = regex.search(r"\s+$", raw, regex.MULTILINE | regex.DOTALL)
| Lint and fix throws exception when having jinja for loop inside set
### Search before asking
- [X] I searched the [issues](https://github.com/sqlfluff/sqlfluff/issues) and found no similar issues.
### What Happened
To reproduce the error, create test.template.sql
```
{% set whitelisted= [
{'name': 'COL_1'},
{'name': 'COL_2'},
{'name': 'COL_3'}
] %}
{% set some_part_of_the_query %}
{% for col in whitelisted %}
{{col.name}}{{ ", " if not loop.last }}
{% endfor %}
{% endset %}
SELECT {{some_part_of_the_query}}
FROM SOME_TABLE
```
when running lint i get this error:
```
==== sqlfluff ====
sqlfluff: 0.11.0 python: 3.8.12
implementation: cpython dialect: snowflake
verbosity: 1 templater: jinja
==== readout ====
=== [ path: test.template.sql ] ===
WARNING Unable to lint test.template.sql due to an internal error. Please report this as an issue with your query's contents and stacktrace below!
To hide this warning, add the failing file to .sqlfluffignore
Traceback (most recent call last):
File "lib/python3.8/site-packages/sqlfluff/core/linter/runner.py", line 103, in run
yield partial()
File "lib/python3.8/site-packages/sqlfluff/core/linter/linter.py", line 666, in lint_rendered
parsed = cls.parse_rendered(rendered)
File "lib/python3.8/site-packages/sqlfluff/core/linter/linter.py", line 352, in parse_rendered
tokens, lvs, config = cls._lex_templated_file(
File "lib/python3.8/site-packages/sqlfluff/core/linter/linter.py", line 139, in _lex_templated_file
tokens, lex_vs = lexer.lex(templated_file)
File "lib/python3.8/site-packages/sqlfluff/core/parser/lexer.py", line 321, in lex
segments: Tuple[RawSegment, ...] = self.elements_to_segments(
File "lib/python3.8/site-packages/sqlfluff/core/parser/lexer.py", line 348, in elements_to_segments
source_slice = templated_file.templated_slice_to_source_slice(
File "lib/python3.8/site-packages/sqlfluff/core/templaters/base.py", line 258, in templated_slice_to_source_slice
ts_stop_sf_start, ts_stop_sf_stop = self._find_slice_indices_of_templated_pos(
File "lib/python3.8/site-packages/sqlfluff/core/templaters/base.py", line 177, in _find_slice_indices_of_templated_pos
raise ValueError("Position Not Found")
ValueError: Position Not Found
==== summary ====
violations: 0 status: PASS
All Finished 📜 🎉!
```
This is the rendered query:
```
SELECT
COL_1,
COL_2,
COL_3
FROM SOME_TABLE
```
And when trying around to make this work i removed the new lines between the selected columns like this:
```
{% set whitelisted= [
{'name': 'COL_1'},
{'name': 'COL_2'},
{'name': 'COL_3'}
] %}
{% set some_part_of_the_query %}
{% for col in whitelisted -%}
{{col.name}}{{ ", " if not loop.last }}
{% endfor -%}
{% endset %}
SELECT {{some_part_of_the_query}}
FROM SOME_TABLE
```
which renders:
```
SELECT
COL_1,
COL_2,
COL_3
FROM SOME_TABLE
```
And this will make the linter pass:
```
==== sqlfluff ====
sqlfluff: 0.11.0 python: 3.8.12
implementation: cpython dialect: snowflake
verbosity: 1 templater: jinja
==== readout ====
=== [ path: test.template.sql ] ===
== [test.template.sql] PASS
==== summary ====
violations: 0 status: PASS
All Finished 📜 🎉!
```
### Expected Behaviour
My expectations is that the linter and fix should pass.
### Observed Behaviour
Right now lint and fix throws exception (see "What Happened" section)
### How to reproduce
Mentioned above.
### Dialect
snowflake
### Version
sqlfluff, version 0.11.0
### Configuration
[sqlfluff]
verbose = 1
dialect = snowflake
templater = jinja
exclude_rules = L027,L031,L032,L036,L044,L046,L034,L050
output_line_length = 121
sql_file_exts=.sql
[sqlfluff:rules]
tab_space_size = 4
max_line_length = 250
indent_unit = space
comma_style = trailing
allow_scalar = True
single_table_references = consistent
unquoted_identifiers_policy = aliases
[sqlfluff:rules:L042]
forbid_subquery_in = both
[sqlfluff:rules:L010] # Keywords
capitalisation_policy = upper
[sqlfluff:rules:L014]
extended_capitalisation_policy = lower
[sqlfluff:rules:L030] # function names
extended_capitalisation_policy = upper
### Are you willing to work on and submit a PR to address the issue?
- [ ] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/sqlfluff/sqlfluff/blob/main/CODE_OF_CONDUCT.md)
| sqlfluff/sqlfluff | diff --git a/test/core/templaters/base_test.py b/test/core/templaters/base_test.py
index fad410cab..632ac2810 100644
--- a/test/core/templaters/base_test.py
+++ b/test/core/templaters/base_test.py
@@ -134,6 +134,7 @@ def test__templated_file_get_line_pos_of_char_pos(
templated_str=templated_str,
sliced_file=file_slices,
fname="test",
+ check_consistency=False,
)
res_line_no, res_line_pos = file.get_line_pos_of_char_pos(in_charpos)
assert res_line_no == out_line_no
@@ -287,6 +288,7 @@ def test__templated_file_templated_slice_to_source_slice(
for rs in raw_slices
],
fname="test",
+ check_consistency=False,
)
source_slice = file.templated_slice_to_source_slice(in_slice)
literal_test = file.is_source_slice_literal(source_slice)
@@ -303,5 +305,6 @@ def test__templated_file_source_only_slices():
RawFileSlice("b" * 7, "comment", 10),
RawFileSlice("a" * 10, "literal", 17),
],
+ check_consistency=False,
)
assert file.source_only_slices() == [RawFileSlice("b" * 7, "comment", 10)]
diff --git a/test/core/templaters/jinja_test.py b/test/core/templaters/jinja_test.py
index df078ee96..b50ab31bc 100644
--- a/test/core/templaters/jinja_test.py
+++ b/test/core/templaters/jinja_test.py
@@ -370,6 +370,35 @@ def test__templater_jinja_slices(case: RawTemplatedTestCase):
assert actual_rs_source_list == case.expected_raw_sliced__source_list
+def test_templater_set_block_handling():
+ """Test handling of literals in {% set %} blocks.
+
+ Specifically, verify they are not modified in the alternate template.
+ """
+
+ def run_query(sql):
+ # Prior to the bug fix, this assertion failed. This was bad because,
+ # inside JinjaTracer, dbt templates similar to the one in this test
+ # would call the database with funky SQL (including weird strings it
+ # uses internally like: 00000000000000000000000000000002.
+ assert sql == "\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n"
+ return sql
+
+ t = JinjaTemplater(override_context=dict(run_query=run_query))
+ instr = """{% set my_query1 %}
+select 1 from foobarfoobarfoobarfoobar_{{ "dev" }}
+{% endset %}
+{% set my_query2 %}
+{{ my_query1 }}
+{% endset %}
+
+{{ run_query(my_query2) }}
+"""
+ outstr, vs = t.process(in_str=instr, fname="test", config=FluffConfig())
+ assert str(outstr) == "\n\n\n\n\nselect 1 from foobarfoobarfoobarfoobar_dev\n\n\n"
+ assert len(vs) == 0
+
+
def test__templater_jinja_error_variable():
"""Test missing variable error handling in the jinja templater."""
t = JinjaTemplater(override_context=dict(blah="foo"))
@@ -846,6 +875,45 @@ from my_table
("literal", slice(312, 327, None), slice(27, 42, None)),
],
),
+ (
+ # Test for issue 2835. There's no space between "col" and "="
+ """{% set col= "col1" %}
+SELECT {{ col }}
+""",
+ None,
+ [
+ ("block_start", slice(0, 21, None), slice(0, 0, None)),
+ ("literal", slice(21, 29, None), slice(0, 8, None)),
+ ("templated", slice(29, 38, None), slice(8, 12, None)),
+ ("literal", slice(38, 39, None), slice(12, 13, None)),
+ ],
+ ),
+ (
+ # Another test for issue 2835. The {% for %} loop inside the
+ # {% set %} caused JinjaTracer to think the {% set %} ended
+ # at the {% endfor %}
+ """{% set some_part_of_the_query %}
+ {% for col in ["col1"] %}
+ {{col}}
+ {% endfor %}
+{% endset %}
+
+SELECT {{some_part_of_the_query}}
+FROM SOME_TABLE
+""",
+ None,
+ [
+ ("block_start", slice(0, 32, None), slice(0, 0, None)),
+ ("literal", slice(32, 37, None), slice(0, 0, None)),
+ ("block_start", slice(37, 62, None), slice(0, 0, None)),
+ ("block_end", slice(79, 91, None), slice(0, 0, None)),
+ ("literal", slice(91, 92, None), slice(0, 0, None)),
+ ("block_end", slice(92, 104, None), slice(0, 0, None)),
+ ("literal", slice(104, 113, None), slice(0, 9, None)),
+ ("templated", slice(113, 139, None), slice(9, 29, None)),
+ ("literal", slice(139, 156, None), slice(29, 46, None)),
+ ],
+ ),
],
)
def test__templater_jinja_slice_file(raw_file, override_context, result, caplog):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 0.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-sugar"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
chardet==5.2.0
click==8.1.8
colorama==0.4.6
coverage==7.8.0
diff_cover==9.2.4
exceptiongroup==1.2.2
execnet==2.1.1
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
packaging==24.2
pathspec==0.12.1
pluggy==1.5.0
Pygments==2.19.1
pytest==8.3.5
pytest-cov==6.0.0
pytest-sugar==1.0.0
pytest-xdist==3.6.1
PyYAML==6.0.2
regex==2024.11.6
-e git+https://github.com/sqlfluff/sqlfluff.git@0bbd70f38a3318b9a488d988d06e8005e222d6ac#egg=sqlfluff
tblib==3.0.0
termcolor==2.5.0
toml==0.10.2
tomli==2.2.1
tqdm==4.67.1
typing_extensions==4.13.0
| name: sqlfluff
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- chardet==5.2.0
- click==8.1.8
- colorama==0.4.6
- coverage==7.8.0
- diff-cover==9.2.4
- exceptiongroup==1.2.2
- execnet==2.1.1
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- packaging==24.2
- pathspec==0.12.1
- pluggy==1.5.0
- pygments==2.19.1
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-sugar==1.0.0
- pytest-xdist==3.6.1
- pyyaml==6.0.2
- regex==2024.11.6
- tblib==3.0.0
- termcolor==2.5.0
- toml==0.10.2
- tomli==2.2.1
- tqdm==4.67.1
- typing-extensions==4.13.0
prefix: /opt/conda/envs/sqlfluff
| [
"test/core/templaters/base_test.py::test__templated_file_get_line_pos_of_char_pos[01234\\n6789{{foo}}fo\\nbarss-01234\\n6789x\\nfo\\nbarfss-file_slices0-0-1-1]",
"test/core/templaters/base_test.py::test__templated_file_get_line_pos_of_char_pos[01234\\n6789{{foo}}fo\\nbarss-01234\\n6789x\\nfo\\nbarfss-file_slices1-20-3-1]",
"test/core/templaters/base_test.py::test__templated_file_get_line_pos_of_char_pos[01234\\n6789{{foo}}fo\\nbarss-01234\\n6789x\\nfo\\nbarfss-file_slices2-24-3-5]",
"test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice0-out_slice0-True-file_slices0-raw_slices0]",
"test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice1-out_slice1-True-file_slices1-raw_slices1]",
"test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice2-out_slice2-True-file_slices2-raw_slices2]",
"test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice3-out_slice3-False-file_slices3-raw_slices3]",
"test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice4-out_slice4-False-file_slices4-raw_slices4]",
"test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice5-out_slice5-True-file_slices5-raw_slices5]",
"test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice6-out_slice6-True-file_slices6-raw_slices6]",
"test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice7-out_slice7-True-file_slices7-raw_slices7]",
"test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice8-out_slice8-True-file_slices8-raw_slices8]",
"test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice9-out_slice9-True-file_slices9-raw_slices9]",
"test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice10-out_slice10-True-file_slices10-raw_slices10]",
"test/core/templaters/base_test.py::test__templated_file_templated_slice_to_source_slice[in_slice11-out_slice11-False-file_slices11-raw_slices11]",
"test/core/templaters/base_test.py::test__templated_file_source_only_slices",
"test/core/templaters/jinja_test.py::test_templater_set_block_handling",
"test/core/templaters/jinja_test.py::test__templater_jinja_slice_file[{%"
] | [] | [
"test/core/templaters/base_test.py::test__indices_of_newlines[-positions0]",
"test/core/templaters/base_test.py::test__indices_of_newlines[foo-positions1]",
"test/core/templaters/base_test.py::test__indices_of_newlines[foo\\nbar-positions2]",
"test/core/templaters/base_test.py::test__indices_of_newlines[\\nfoo\\n\\nbar\\nfoo\\n\\nbar\\n-positions3]",
"test/core/templaters/base_test.py::test__templater_raw",
"test/core/templaters/base_test.py::test__templated_file_find_slice_indices_of_templated_pos[100-True-file_slices0-10-11]",
"test/core/templaters/base_test.py::test__templated_file_find_slice_indices_of_templated_pos[13-True-file_slices1-0-3]",
"test/core/templaters/base_test.py::test__templated_file_find_slice_indices_of_templated_pos[28-True-file_slices2-2-5]",
"test/core/templaters/base_test.py::test__templated_file_find_slice_indices_of_templated_pos[12-True-file_slices3-1-3]",
"test/core/templaters/base_test.py::test__templated_file_find_slice_indices_of_templated_pos[20-True-file_slices4-2-3]",
"test/core/templaters/base_test.py::test__templated_file_find_slice_indices_of_templated_pos[13-False-file_slices5-0-1]",
"test/core/templaters/jinja_test.py::test__templater_jinja[simple]",
"test/core/templaters/jinja_test.py::test__templater_jinja[unboundlocal_bugfix]",
"test/core/templaters/jinja_test.py::test__templater_jinja_slices[basic_block]",
"test/core/templaters/jinja_test.py::test__templater_jinja_slices[strip_left_block]",
"test/core/templaters/jinja_test.py::test__templater_jinja_slices[strip_both_block]",
"test/core/templaters/jinja_test.py::test__templater_jinja_slices[basic_data]",
"test/core/templaters/jinja_test.py::test__templater_jinja_slices[strip_right_data]",
"test/core/templaters/jinja_test.py::test__templater_jinja_slices[strip_both_data]",
"test/core/templaters/jinja_test.py::test__templater_jinja_slices[strip_both_comment]",
"test/core/templaters/jinja_test.py::test__templater_jinja_slices[union_all_loop1]",
"test/core/templaters/jinja_test.py::test__templater_jinja_error_variable",
"test/core/templaters/jinja_test.py::test__templater_jinja_error_syntax",
"test/core/templaters/jinja_test.py::test__templater_jinja_error_catastrophic",
"test/core/templaters/jinja_test.py::test__templater_jinja_error_macro_path_does_not_exist",
"test/core/templaters/jinja_test.py::test__templater_jinja_lint_empty",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_a/jinja-True-False]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_b/jinja-False-False]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_c_dbt/dbt_builtins-True-False]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_c_dbt/var_default-True-False]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_e/jinja-True-False]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_f/jinja-True-False]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_g_macros/jinja-True-False]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_h_macros/jinja-True-False]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_i_raw/raw_tag-True-False]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_i_raw/raw_tag_2-True-False]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_j_libraries/jinja-True-False]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_k_config_override_path_macros/jinja-True-False]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_l_metas/001-False-True]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_l_metas/002-False-True]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_m_libraries_module/jinja-True-False]",
"test/core/templaters/jinja_test.py::test__templater_full[jinja_n_nested_macros/jinja-True-False]",
"test/core/templaters/jinja_test.py::test__templater_jinja_slice_template[-result0]",
"test/core/templaters/jinja_test.py::test__templater_jinja_slice_template[foo-result1]",
"test/core/templaters/jinja_test.py::test__templater_jinja_slice_template[foo",
"test/core/templaters/jinja_test.py::test__templater_jinja_slice_template[SELECT",
"test/core/templaters/jinja_test.py::test__templater_jinja_slice_template[{%",
"test/core/templaters/jinja_test.py::test__templater_jinja_slice_file[-None-result0]",
"test/core/templaters/jinja_test.py::test__templater_jinja_slice_file[foo-None-result1]",
"test/core/templaters/jinja_test.py::test__templater_jinja_slice_file[SELECT",
"test/core/templaters/jinja_test.py::test__templater_jinja_slice_file[{{",
"test/core/templaters/jinja_test.py::test__templater_jinja_slice_file[SELECT\\n",
"test/core/templaters/jinja_test.py::test__templater_jinja_slice_file[{%-",
"test/core/templaters/jinja_test.py::test__templater_jinja_slice_file[select\\n"
] | [] | MIT License | 12,403 | 2,293 | [
"plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py",
"src/sqlfluff/core/templaters/base.py",
"src/sqlfluff/core/templaters/slicers/tracer.py"
] |
asottile__pyupgrade-611 | d71f806ce2fa0799c8dbe8cdcba7f9d5af2fd167 | 2022-03-13 19:09:43 | e4f7820376aadf1edc5e66a375f010a25f5fb281 | asottile: I think it should maybe just skip in this case (and with `**kwargs`) -- in OP's example it would have broken `their_subprocess_wrapper(universal_newlines=True)`
thoughts? | diff --git a/pyupgrade/_plugins/subprocess_run.py b/pyupgrade/_plugins/subprocess_run.py
index 48affbe..53cc03d 100644
--- a/pyupgrade/_plugins/subprocess_run.py
+++ b/pyupgrade/_plugins/subprocess_run.py
@@ -79,6 +79,7 @@ def visit_Call(
stdout_idx = None
stderr_idx = None
universal_newlines_idx = None
+ skip_universal_newlines_rewrite = False
for n, keyword in enumerate(node.keywords):
if keyword.arg == 'stdout' and is_name_attr(
keyword.value,
@@ -96,7 +97,12 @@ def visit_Call(
stderr_idx = n
elif keyword.arg == 'universal_newlines':
universal_newlines_idx = n
- if universal_newlines_idx is not None:
+ elif keyword.arg == 'text' or keyword.arg is None:
+ skip_universal_newlines_rewrite = True
+ if (
+ universal_newlines_idx is not None and
+ not skip_universal_newlines_rewrite
+ ):
func = functools.partial(
_replace_universal_newlines_with_text,
arg_idx=len(node.args) + universal_newlines_idx,
| universal_newlines conversion can lead to doubled "text"
If for some reason `universal_newlines` and `text` are both passed to a `subprocess.run()`, replacing the first with the latter causes `text` to be doubled. I suggest to in such case remove `universal_newlines`, if `text` has priority anyway with Python 3.6 and later. | asottile/pyupgrade | diff --git a/tests/features/universal_newlines_to_text_test.py b/tests/features/universal_newlines_to_text_test.py
index 2e7e298..e7acd9e 100644
--- a/tests/features/universal_newlines_to_text_test.py
+++ b/tests/features/universal_newlines_to_text_test.py
@@ -27,6 +27,26 @@ from pyupgrade._main import _fix_plugins
(3, 7),
id='universal_newlines not used',
),
+ pytest.param(
+ 'import subprocess\n'
+ 'subprocess.run(\n'
+ ' ["foo"],\n'
+ ' text=True,\n'
+ ' universal_newlines=True\n'
+ ')\n',
+ (3, 7),
+ id='both text and universal_newlines',
+ ),
+ pytest.param(
+ 'import subprocess\n'
+ 'subprocess.run(\n'
+ ' ["foo"],\n'
+ ' universal_newlines=True,\n'
+ ' **kwargs,\n'
+ ')\n',
+ (3, 7),
+ id='both **kwargs and universal_newlines',
+ ),
),
)
def test_fix_universal_newlines_to_text_noop(s, version):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 2.31 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | covdefaults==2.3.0
coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
-e git+https://github.com/asottile/pyupgrade.git@d71f806ce2fa0799c8dbe8cdcba7f9d5af2fd167#egg=pyupgrade
tokenize_rt==6.1.0
tomli==2.2.1
| name: pyupgrade
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- covdefaults==2.3.0
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- tokenize-rt==6.1.0
- tomli==2.2.1
prefix: /opt/conda/envs/pyupgrade
| [
"tests/features/universal_newlines_to_text_test.py::test_fix_universal_newlines_to_text_noop[both"
] | [] | [
"tests/features/universal_newlines_to_text_test.py::test_fix_universal_newlines_to_text_noop[not",
"tests/features/universal_newlines_to_text_test.py::test_fix_universal_newlines_to_text_noop[run",
"tests/features/universal_newlines_to_text_test.py::test_fix_universal_newlines_to_text_noop[universal_newlines",
"tests/features/universal_newlines_to_text_test.py::test_fix_universal_newlines_to_text[subprocess.run",
"tests/features/universal_newlines_to_text_test.py::test_fix_universal_newlines_to_text[run",
"tests/features/universal_newlines_to_text_test.py::test_fix_universal_newlines_to_text[universal_newlines",
"tests/features/universal_newlines_to_text_test.py::test_fix_universal_newlines_to_text[with"
] | [] | MIT License | 12,407 | 274 | [
"pyupgrade/_plugins/subprocess_run.py"
] |
python-pillow__Pillow-6134 | 6faebd3ff321e7b2dd780950858301e1b2d76db8 | 2022-03-14 12:22:23 | a6a843e5482345ffcd6cb45a534a11e839fcef43 | diff --git a/src/PIL/Image.py b/src/PIL/Image.py
index 8213f79b1..b471e77ec 100644
--- a/src/PIL/Image.py
+++ b/src/PIL/Image.py
@@ -2286,7 +2286,9 @@ class Image:
else:
save_handler = SAVE[format.upper()]
+ created = False
if open_fp:
+ created = not os.path.exists(filename)
if params.get("append", False):
# Open also for reading ("+"), because TIFF save_all
# writer needs to go back and edit the written data.
@@ -2296,10 +2298,17 @@ class Image:
try:
save_handler(self, fp, filename)
- finally:
- # do what we can to clean up
+ except Exception:
if open_fp:
fp.close()
+ if created:
+ try:
+ os.remove(filename)
+ except PermissionError:
+ pass
+ raise
+ if open_fp:
+ fp.close()
def seek(self, frame):
"""
| Trying to save a (0, 0) image creates an unreadable file
### What did you do?
```python
from PIL import Image
Image.new("RGB",(0,0)).save('test.png') # `.png` or `.jpg` or `.tiff`
```
### What did you expect to happen?
no file should be created on disk because there's an `SystemError: tile cannot extend outside image` error.
(https://github.com/python-pillow/Pillow/issues/5931#issuecomment-1007142950 for jpg, libjpeg seems not support size 0 0)
### What actually happened?
throw `SystemError: tile cannot extend outside image`
a ~33 bytes file is on disk, which is unable to read via pillow and other viewer No matter the extension is `.png` or `.jpg` or `.tiff`
### What are your OS, Python and Pillow versions?
* OS: win10 1803
* Python: 3.8.10
* Pillow: 9.0.0 | python-pillow/Pillow | diff --git a/Tests/test_image.py b/Tests/test_image.py
index 2cd858df1..5127feb93 100644
--- a/Tests/test_image.py
+++ b/Tests/test_image.py
@@ -652,6 +652,15 @@ class TestImage:
with warnings.catch_warnings():
im.save(temp_file)
+ def test_no_new_file_on_error(self, tmp_path):
+ temp_file = str(tmp_path / "temp.jpg")
+
+ im = Image.new("RGB", (0, 0))
+ with pytest.raises(SystemError):
+ im.save(temp_file)
+
+ assert not os.path.exists(temp_file)
+
def test_load_on_nonexclusive_multiframe(self):
with open("Tests/images/frozenpond.mpo", "rb") as fp:
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 9.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest pytest-cov pytest-timeout",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libjpeg-dev zlib1g-dev libtiff5-dev libfreetype6-dev liblcms2-dev libwebp-dev libopenjp2-7-dev libimagequant-dev libraqm-dev libxcb1-dev"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
-e git+https://github.com/python-pillow/Pillow.git@6faebd3ff321e7b2dd780950858301e1b2d76db8#egg=Pillow
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
pytest-timeout==2.3.1
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: Pillow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- pytest-cov==6.0.0
- pytest-timeout==2.3.1
prefix: /opt/conda/envs/Pillow
| [
"Tests/test_image.py::TestImage::test_no_new_file_on_error"
] | [] | [
"Tests/test_image.py::TestImage::test_image_modes_success",
"Tests/test_image.py::TestImage::test_image_modes_fail",
"Tests/test_image.py::TestImage::test_exception_inheritance",
"Tests/test_image.py::TestImage::test_sanity",
"Tests/test_image.py::TestImage::test_repr_pretty",
"Tests/test_image.py::TestImage::test_open_formats",
"Tests/test_image.py::TestImage::test_width_height",
"Tests/test_image.py::TestImage::test_invalid_image",
"Tests/test_image.py::TestImage::test_bad_mode",
"Tests/test_image.py::TestImage::test_stringio",
"Tests/test_image.py::TestImage::test_pathlib",
"Tests/test_image.py::TestImage::test_fp_name",
"Tests/test_image.py::TestImage::test_tempfile",
"Tests/test_image.py::TestImage::test_unknown_extension",
"Tests/test_image.py::TestImage::test_internals",
"Tests/test_image.py::TestImage::test_readonly_save",
"Tests/test_image.py::TestImage::test_dump",
"Tests/test_image.py::TestImage::test_comparison_with_other_type",
"Tests/test_image.py::TestImage::test_expand_x",
"Tests/test_image.py::TestImage::test_expand_xy",
"Tests/test_image.py::TestImage::test_getbands",
"Tests/test_image.py::TestImage::test_getchannel_wrong_params",
"Tests/test_image.py::TestImage::test_getchannel",
"Tests/test_image.py::TestImage::test_getbbox",
"Tests/test_image.py::TestImage::test_ne",
"Tests/test_image.py::TestImage::test_alpha_composite",
"Tests/test_image.py::TestImage::test_alpha_inplace",
"Tests/test_image.py::TestImage::test_registered_extensions_uninitialized",
"Tests/test_image.py::TestImage::test_registered_extensions",
"Tests/test_image.py::TestImage::test_effect_mandelbrot",
"Tests/test_image.py::TestImage::test_effect_mandelbrot_bad_arguments",
"Tests/test_image.py::TestImage::test_effect_noise",
"Tests/test_image.py::TestImage::test_effect_spread",
"Tests/test_image.py::TestImage::test_effect_spread_zero",
"Tests/test_image.py::TestImage::test_check_size",
"Tests/test_image.py::TestImage::test_storage_neg",
"Tests/test_image.py::TestImage::test_one_item_tuple",
"Tests/test_image.py::TestImage::test_linear_gradient_wrong_mode",
"Tests/test_image.py::TestImage::test_linear_gradient",
"Tests/test_image.py::TestImage::test_radial_gradient_wrong_mode",
"Tests/test_image.py::TestImage::test_radial_gradient",
"Tests/test_image.py::TestImage::test_register_extensions",
"Tests/test_image.py::TestImage::test_remap_palette",
"Tests/test_image.py::TestImage::test__new",
"Tests/test_image.py::TestImage::test_p_from_rgb_rgba",
"Tests/test_image.py::TestImage::test_no_resource_warning_on_save",
"Tests/test_image.py::TestImage::test_load_on_nonexclusive_multiframe",
"Tests/test_image.py::TestImage::test_exif_jpeg",
"Tests/test_image.py::TestImage::test_exif_webp",
"Tests/test_image.py::TestImage::test_exif_png",
"Tests/test_image.py::TestImage::test_exif_interop",
"Tests/test_image.py::TestImage::test_exif_ifd",
"Tests/test_image.py::TestImage::test_exif_load_from_fp",
"Tests/test_image.py::TestImage::test_zero_tobytes[size0]",
"Tests/test_image.py::TestImage::test_zero_tobytes[size1]",
"Tests/test_image.py::TestImage::test_zero_tobytes[size2]",
"Tests/test_image.py::TestImage::test_categories_deprecation",
"Tests/test_image.py::TestImage::test_constants_deprecation",
"Tests/test_image.py::TestImage::test_overrun[fli_overrun.bin]",
"Tests/test_image.py::TestImage::test_overrun[sgi_overrun.bin]",
"Tests/test_image.py::TestImage::test_overrun[sgi_overrun_expandrow.bin]",
"Tests/test_image.py::TestImage::test_overrun[sgi_overrun_expandrow2.bin]",
"Tests/test_image.py::TestImage::test_overrun[pcx_overrun.bin]",
"Tests/test_image.py::TestImage::test_overrun[pcx_overrun2.bin]",
"Tests/test_image.py::TestImage::test_overrun[ossfuzz-4836216264589312.pcx]",
"Tests/test_image.py::TestImage::test_overrun[01r_00.pcx]",
"Tests/test_image.py::TestImage::test_fli_overrun2",
"Tests/test_image.py::TestRegistry::test_encode_registry",
"Tests/test_image.py::TestRegistry::test_encode_registry_fail"
] | [] | MIT-CMU License | 12,410 | 262 | [
"src/PIL/Image.py"
] |
|
conan-io__conan-10797 | 4a654782ca87804e22e9e9a9fb8741c44212fe48 | 2022-03-16 08:19:43 | 3e1a421412dfadb9334fbbe7759266645bfb58d8 | czoido: Checking the Xcode 13.3 release notes: https://developer.apple.com/documentation/xcode-release-notes/xcode-13_3-release-notes
```
Xcode no longer passes -stdlib=libstdc++ to Clang, because Clang no longer supports that library on Apple platforms.
If your project defines the CLANG_CXX_LIBRARY build setting, remove it because it no longer does anything. (83768231)
```
| diff --git a/conans/client/conf/__init__.py b/conans/client/conf/__init__.py
index a0c5bb8c6..94d7075cb 100644
--- a/conans/client/conf/__init__.py
+++ b/conans/client/conf/__init__.py
@@ -37,7 +37,7 @@ _t_default_settings_yml = Template(textwrap.dedent("""
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "12.0", "13.0"]
sdk: [None, "macosx"]
- sdk_version: [None, "10.13", "10.14", "10.15", "11.0", "11.1", "11.3", "12.0", "12.1"]
+ sdk_version: [None, "10.13", "10.14", "10.15", "11.0", "11.1", "11.3", "12.0", "12.1", "12.3"]
subsystem: [None, catalyst]
Android:
api_level: ANY
@@ -45,24 +45,25 @@ _t_default_settings_yml = Template(textwrap.dedent("""
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3",
"11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6", "13.7",
- "14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7", "14.8", "15.0", "15.1"]
+ "14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7", "14.8",
+ "15.0", "15.1", "15.2", "15.3", "15.4"]
sdk: [None, "iphoneos", "iphonesimulator"]
sdk_version: [None, "11.3", "11.4", "12.0", "12.1", "12.2", "12.4",
"13.0", "13.1", "13.2", "13.4", "13.5", "13.6", "13.7",
- "14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "15.0", "15.2"]
+ "14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "15.0", "15.2", "15.4"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
- "7.0", "7.1", "7.2", "7.3", "7.4", "7.5", "7.6", "8.0", "8.1"]
+ "7.0", "7.1", "7.2", "7.3", "7.4", "7.5", "7.6", "8.0", "8.1", "8.3", "8.4", "8.5"]
sdk: [None, "watchos", "watchsimulator"]
sdk_version: [None, "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
- "7.0", "7.1", "7.2", "7.4", "8.0", "8.0.1", "8.3"]
+ "7.0", "7.1", "7.2", "7.4", "8.0", "8.0.1", "8.3", "8.5"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.2", "13.3", "13.4", "14.0", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7",
- "15.0", "15.1"]
+ "15.0", "15.1", "15.2", "15.3", "15.4"]
sdk: [None, "appletvos", "appletvsimulator"]
sdk_version: [None, "11.3", "11.4", "12.0", "12.1", "12.2", "12.4",
- "13.0", "13.1", "13.2", "13.4", "14.0", "14.2", "14.3", "14.5", "15.0", "15.2"]
+ "13.0", "13.1", "13.2", "13.4", "14.0", "14.2", "14.3", "14.5", "15.0", "15.2", "15.4"]
FreeBSD:
SunOS:
AIX:
@@ -117,7 +118,7 @@ _t_default_settings_yml = Template(textwrap.dedent("""
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
- version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0"]
+ version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0", "13.1"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
diff --git a/conans/client/manager.py b/conans/client/manager.py
index e2aea0d62..2a5b4df8c 100644
--- a/conans/client/manager.py
+++ b/conans/client/manager.py
@@ -108,6 +108,11 @@ def deps_install(app, ref_or_path, install_folder, base_folder, graph_info, remo
output = conanfile.output if root_node.recipe != RECIPE_VIRTUAL else out
+ if conanfile.info.invalid:
+ msg = "Invalid ID: {}. ".format(conanfile.info.invalid)
+ msg += "Trying to install dependencies, but this configuration will fail to build a package"
+ output.error(msg)
+
if install_folder:
# Write generators
tmp = list(conanfile.generators) # Add the command line specified generators
diff --git a/conans/client/migrations_settings.py b/conans/client/migrations_settings.py
index cded91a8f..5c21c5be7 100644
--- a/conans/client/migrations_settings.py
+++ b/conans/client/migrations_settings.py
@@ -3278,7 +3278,7 @@ os:
Macos:
version: [None, "10.6", "10.7", "10.8", "10.9", "10.10", "10.11", "10.12", "10.13", "10.14", "10.15", "11.0", "12.0", "13.0"]
sdk: [None, "macosx"]
- sdk_version: [None, "10.13", "10.14", "10.15", "11.0", "11.1", "11.3", "12.0", "12.1"]
+ sdk_version: [None, "10.13", "10.14", "10.15", "11.0", "11.1", "11.3", "12.0", "12.1", "12.3"]
subsystem: [None, catalyst]
Android:
api_level: ANY
@@ -3286,24 +3286,25 @@ os:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3",
"11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.1", "13.2", "13.3", "13.4", "13.5", "13.6", "13.7",
- "14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7", "14.8", "15.0", "15.1"]
+ "14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7", "14.8",
+ "15.0", "15.1", "15.2", "15.3", "15.4"]
sdk: [None, "iphoneos", "iphonesimulator"]
sdk_version: [None, "11.3", "11.4", "12.0", "12.1", "12.2", "12.4",
"13.0", "13.1", "13.2", "13.4", "13.5", "13.6", "13.7",
- "14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "15.0", "15.2"]
+ "14.0", "14.1", "14.2", "14.3", "14.4", "14.5", "15.0", "15.2", "15.4"]
watchOS:
version: ["4.0", "4.1", "4.2", "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
- "7.0", "7.1", "7.2", "7.3", "7.4", "7.5", "7.6", "8.0", "8.1"]
+ "7.0", "7.1", "7.2", "7.3", "7.4", "7.5", "7.6", "8.0", "8.1", "8.3", "8.4", "8.5"]
sdk: [None, "watchos", "watchsimulator"]
sdk_version: [None, "4.3", "5.0", "5.1", "5.2", "5.3", "6.0", "6.1", "6.2",
- "7.0", "7.1", "7.2", "7.4", "8.0", "8.0.1", "8.3"]
+ "7.0", "7.1", "7.2", "7.4", "8.0", "8.0.1", "8.3", "8.5"]
tvOS:
version: ["11.0", "11.1", "11.2", "11.3", "11.4", "12.0", "12.1", "12.2", "12.3", "12.4",
"13.0", "13.2", "13.3", "13.4", "14.0", "14.2", "14.3", "14.4", "14.5", "14.6", "14.7",
- "15.0", "15.1"]
+ "15.0", "15.1", "15.2", "15.3", "15.4"]
sdk: [None, "appletvos", "appletvsimulator"]
sdk_version: [None, "11.3", "11.4", "12.0", "12.1", "12.2", "12.4",
- "13.0", "13.1", "13.2", "13.4", "14.0", "14.2", "14.3", "14.5", "15.0", "15.2"]
+ "13.0", "13.1", "13.2", "13.4", "14.0", "14.2", "14.3", "14.5", "15.0", "15.2", "15.4"]
FreeBSD:
SunOS:
AIX:
@@ -3358,7 +3359,7 @@ compiler:
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20, 23, gnu23]
runtime: [None, MD, MT, MTd, MDd]
apple-clang: &apple_clang
- version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0"]
+ version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1", "10.0", "11.0", "12.0", "13.0", "13.1"]
libcxx: [libstdc++, libc++]
cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17, 20, gnu20]
intel:
| [feature] Support for Xcode 13.3 (currently in beta)
Xcode 13.3 (still in beta) reports clang version as 13.1.6, so equivalent of #9642 is needed.
- [x] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).
| conan-io/conan | diff --git a/conans/test/integration/package_id/test_validate.py b/conans/test/integration/package_id/test_validate.py
index 71cdb2601..d273fe335 100644
--- a/conans/test/integration/package_id/test_validate.py
+++ b/conans/test/integration/package_id/test_validate.py
@@ -248,7 +248,7 @@ class TestValidate(unittest.TestCase):
self.assertIn("dep/0.1: Invalid ID: Windows not supported", client.out)
self.assertIn("pkg/0.1: Invalid ID: Invalid transitive dependencies", client.out)
- def test_validate_export(self):
+ def test_validate_export_pkg(self):
# https://github.com/conan-io/conan/issues/9797
c = TestClient()
conanfile = textwrap.dedent("""
@@ -262,3 +262,20 @@ class TestValidate(unittest.TestCase):
c.save({"conanfile.py": conanfile})
c.run("export-pkg . test/1.0@", assert_error=True)
assert "Invalid ID: never ever" in c.out
+
+ def test_validate_install(self):
+ # https://github.com/conan-io/conan/issues/10602
+ c = TestClient()
+ conanfile = textwrap.dedent("""
+ from conan import ConanFile
+ from conan.errors import ConanInvalidConfiguration
+
+ class TestConan(ConanFile):
+ def validate(self):
+ raise ConanInvalidConfiguration("never ever")
+ """)
+ c.save({"conanfile.py": conanfile})
+ c.run("install .")
+ assert "conanfile.py: ERROR: Invalid ID: never ever" in c.out
+ assert "Trying to install dependencies, but this configuration will fail to build a package"\
+ in c.out
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 3
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.8",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
beautifulsoup4==4.13.3
bottle==0.12.25
certifi==2025.1.31
charset-normalizer==3.4.1
colorama==0.4.6
-e git+https://github.com/conan-io/conan.git@4a654782ca87804e22e9e9a9fb8741c44212fe48#egg=conan
distro==1.6.0
execnet==2.1.1
fasteners==0.19
idna==3.10
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==2.1.5
mock==1.3.0
node-semver==0.6.1
packaging==24.2
parameterized==0.9.0
patch-ng==1.17.4
pbr==6.1.1
pluggy==1.5.0
pluginbase==1.0.1
py==1.11.0
Pygments==2.19.1
PyJWT==1.7.1
pytest==6.2.5
pytest-xdist==3.5.0
python-dateutil==2.9.0.post0
PyYAML==5.4.1
requests==2.32.3
six==1.16.0
soupsieve==2.6
toml==0.10.2
tqdm==4.67.1
typing_extensions==4.13.0
urllib3==1.26.20
waitress==3.0.0
WebOb==1.8.9
WebTest==2.0.35
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=24.2=py38h06a4308_0
- python=3.8.20=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.1.0=py38h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.44.0=py38h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- beautifulsoup4==4.13.3
- bottle==0.12.25
- certifi==2025.1.31
- charset-normalizer==3.4.1
- colorama==0.4.6
- distro==1.6.0
- execnet==2.1.1
- fasteners==0.19
- idna==3.10
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==2.1.5
- mock==1.3.0
- node-semver==0.6.1
- packaging==24.2
- parameterized==0.9.0
- patch-ng==1.17.4
- pbr==6.1.1
- pluggy==1.5.0
- pluginbase==1.0.1
- py==1.11.0
- pygments==2.19.1
- pyjwt==1.7.1
- pytest==6.2.5
- pytest-xdist==3.5.0
- python-dateutil==2.9.0.post0
- pyyaml==5.4.1
- requests==2.32.3
- six==1.16.0
- soupsieve==2.6
- toml==0.10.2
- tqdm==4.67.1
- typing-extensions==4.13.0
- urllib3==1.26.20
- waitress==3.0.0
- webob==1.8.9
- webtest==2.0.35
prefix: /opt/conda/envs/conan
| [
"conans/test/integration/package_id/test_validate.py::TestValidate::test_validate_install"
] | [] | [
"conans/test/integration/package_id/test_validate.py::TestValidate::test_validate_compatible",
"conans/test/integration/package_id/test_validate.py::TestValidate::test_validate_compatible_also_invalid",
"conans/test/integration/package_id/test_validate.py::TestValidate::test_validate_compatible_also_invalid_fail",
"conans/test/integration/package_id/test_validate.py::TestValidate::test_validate_create",
"conans/test/integration/package_id/test_validate.py::TestValidate::test_validate_export_pkg",
"conans/test/integration/package_id/test_validate.py::TestValidate::test_validate_options",
"conans/test/integration/package_id/test_validate.py::TestValidate::test_validate_package_id_mode",
"conans/test/integration/package_id/test_validate.py::TestValidate::test_validate_requires"
] | [] | MIT License | 12,428 | 4,089 | [
"conans/client/conf/__init__.py",
"conans/client/manager.py",
"conans/client/migrations_settings.py"
] |
python-pillow__Pillow-6140 | 0cf072db39bf1868575c625fc4403f388e5d47d4 | 2022-03-17 12:46:07 | a6a843e5482345ffcd6cb45a534a11e839fcef43 | diff --git a/src/PIL/WebPImagePlugin.py b/src/PIL/WebPImagePlugin.py
index 590161f3e..370b44e3a 100644
--- a/src/PIL/WebPImagePlugin.py
+++ b/src/PIL/WebPImagePlugin.py
@@ -192,7 +192,7 @@ def _save_all(im, fp, filename):
r, g, b = palette[background * 3 : (background + 1) * 3]
background = (r, g, b, 0)
- duration = im.encoderinfo.get("duration", im.info.get("duration"))
+ duration = im.encoderinfo.get("duration", im.info.get("duration", 0))
loop = im.encoderinfo.get("loop", 0)
minimize_size = im.encoderinfo.get("minimize_size", False)
kmin = im.encoderinfo.get("kmin", None)
| WebP plugin save animated webp need default duration value
### What did you do?
i only convert a animate gif format to animate webP format
### What did you expect to happen?
it can convert success
### What actually happened?
`python3.7/site-packages/PIL/WebPImagePlugin.py" , line 292, in _save_all
timestamp += duration
TypeError: unsupported operand type(s) for +=: 'int' and 'NoneType'`
### What are your OS, Python and Pillow versions?
* OS: macos Monterey version 12.2.1
* Python: 3.7.11
* Pillow: 8.4.0
through debug, i know the reason is that pillow open gif image use method im.info.get("duration") is null, when i convert to webP format with no Coustom duration param。 because duration come from duration = im.encoderinfo.get("duration", im.info.get("duration")) [WebPImagePlugin.py line 195] be NoneType, it will traceback in WebPImagePlugin.py 292
```python
# Update timestamp and frame index
if isinstance(duration, (list, tuple)):
timestamp += duration[frame_idx]
else:
timestamp += duration
```
traceback:
``` python
Traceback (most recent call last):
File "/Users/jeevi/Documents/WWW/study/python/pillow_img/img/img.py", line 198, in <module>
im.save(bts, "webP", save_all=True, quality=75, optimize=True)
File "/Users/jeevi/Documents/WWW/study/python/pillow_img/python/lib/python3.7/site-packages/PIL/Image.py", line 2240, in save
save_handler(self, fp, filename)
File "/Users/jeevi/Documents/WWW/study/python/pillow_img/python/lib/python3.7/site-packages/PIL/WebPImagePlugin.py", line 292, in _save_all
timestamp += duration
TypeError: unsupported operand type(s) for +=: 'int' and 'NoneType'
```
**i think duration need a default value when not get**
here is my code:
```python
file = "./YD_cnt_11_019CS3rXgAIN.gif"
with Image.open(file) as im:
print(im.format, im.size, im.mode, getattr(im, "is_animated", False), getattr(im, "n_frames", 1),
getattr(im, "n_frames", 1) // 2)
bts = BytesIO()
im.save(bts, "webP", save_all=True, quality=75, optimize=True)
print("长度:{} \n".format(len(bts.getvalue())))
with open("YD_cnt_11_019CS3rXgAIN.gif.webp", "wb") as f:
f.write(bts.getvalue())
t2 = time.time()
cost = int(round(t2 * 1000)) - int(round(t * 1000))
print("耗时:{}ms\n".format(cost))
```
the image file in my github : [https://github.com/jeevi-cao/study/blob/master/python/YD_cnt_11_019CS3rXgAIN.gif]( https://github.com/jeevi-cao/study/blob/master/python/YD_cnt_11_019CS3rXgAIN.gif)
| python-pillow/Pillow | diff --git a/Tests/test_file_webp.py b/Tests/test_file_webp.py
index 051119378..13fbb5291 100644
--- a/Tests/test_file_webp.py
+++ b/Tests/test_file_webp.py
@@ -8,6 +8,7 @@ import pytest
from PIL import Image, WebPImagePlugin, features
from .helper import (
+ assert_image_equal,
assert_image_similar,
assert_image_similar_tofile,
hopper,
@@ -105,6 +106,19 @@ class TestFileWebp:
hopper().save(buffer_method, format="WEBP", method=6)
assert buffer_no_args.getbuffer() != buffer_method.getbuffer()
+ @skip_unless_feature("webp_anim")
+ def test_save_all(self, tmp_path):
+ temp_file = str(tmp_path / "temp.webp")
+ im = Image.new("RGB", (1, 1))
+ im2 = Image.new("RGB", (1, 1), "#f00")
+ im.save(temp_file, save_all=True, append_images=[im2])
+
+ with Image.open(temp_file) as reloaded:
+ assert_image_equal(im, reloaded)
+
+ reloaded.seek(1)
+ assert_image_similar(im2, reloaded, 1)
+
def test_icc_profile(self, tmp_path):
self._roundtrip(tmp_path, self.rgb_mode, 12.5, {"icc_profile": None})
if _webp.HAVE_WEBPANIM:
@@ -171,7 +185,6 @@ class TestFileWebp:
Image.open(blob).load()
Image.open(blob).load()
- @skip_unless_feature("webp")
@skip_unless_feature("webp_anim")
def test_background_from_gif(self, tmp_path):
with Image.open("Tests/images/chi.gif") as im:
@@ -191,7 +204,6 @@ class TestFileWebp:
difference = sum(abs(original_value[i] - reread_value[i]) for i in range(0, 3))
assert difference < 5
- @skip_unless_feature("webp")
@skip_unless_feature("webp_anim")
def test_duration(self, tmp_path):
with Image.open("Tests/images/dispose_bgnd.gif") as im:
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 9.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-timeout",
"pytest-reverse"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libtiff-dev libjpeg-dev libopenjp2-7-dev zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev python3-tk libharfbuzz-dev libfribidi-dev libxcb1-dev"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
-e git+https://github.com/python-pillow/Pillow.git@0cf072db39bf1868575c625fc4403f388e5d47d4#egg=Pillow
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-reverse==1.8.0
pytest-timeout==2.3.1
tomli==2.2.1
| name: Pillow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-reverse==1.8.0
- pytest-timeout==2.3.1
- tomli==2.2.1
prefix: /opt/conda/envs/Pillow
| [
"Tests/test_file_webp.py::TestFileWebp::test_save_all"
] | [] | [
"Tests/test_file_webp.py::TestUnsupportedWebp::test_unsupported",
"Tests/test_file_webp.py::TestFileWebp::test_version",
"Tests/test_file_webp.py::TestFileWebp::test_read_rgb",
"Tests/test_file_webp.py::TestFileWebp::test_write_rgb",
"Tests/test_file_webp.py::TestFileWebp::test_write_method",
"Tests/test_file_webp.py::TestFileWebp::test_icc_profile",
"Tests/test_file_webp.py::TestFileWebp::test_write_unsupported_mode_L",
"Tests/test_file_webp.py::TestFileWebp::test_write_unsupported_mode_P",
"Tests/test_file_webp.py::TestFileWebp::test_write_encoding_error_message",
"Tests/test_file_webp.py::TestFileWebp::test_WebPEncode_with_invalid_args",
"Tests/test_file_webp.py::TestFileWebp::test_WebPDecode_with_invalid_args",
"Tests/test_file_webp.py::TestFileWebp::test_no_resource_warning",
"Tests/test_file_webp.py::TestFileWebp::test_file_pointer_could_be_reused",
"Tests/test_file_webp.py::TestFileWebp::test_background_from_gif",
"Tests/test_file_webp.py::TestFileWebp::test_duration"
] | [] | MIT-CMU License | 12,431 | 214 | [
"src/PIL/WebPImagePlugin.py"
] |
|
john-kurkowski__tldextract-258 | 86c82c3a9e3e9a2980a39ce8b77e1ad0bdebcc12 | 2022-03-17 16:22:58 | 60c6f645884dc9d79a475a031899a85b4cba80da | diff --git a/tldextract/cache.py b/tldextract/cache.py
index b714e77..fbd02c9 100644
--- a/tldextract/cache.py
+++ b/tldextract/cache.py
@@ -7,7 +7,7 @@ import os
import os.path
import sys
from hashlib import md5
-from typing import Callable, Dict, Hashable, List, Optional, TypeVar, Union
+from typing import Callable, Dict, Hashable, Iterable, Optional, TypeVar, Union
from filelock import FileLock
import requests
@@ -166,7 +166,7 @@ class DiskCache:
func: Callable[..., T],
namespace: str,
kwargs: Dict[str, Hashable],
- hashed_argnames: List[str],
+ hashed_argnames: Iterable[str],
) -> T:
"""Get a url but cache the response"""
if not self.enabled:
@@ -203,7 +203,7 @@ class DiskCache:
result: T = self.get(namespace=namespace, key=key_args)
except KeyError:
result = func(**kwargs)
- self.set(namespace="urls", key=key_args, value=result)
+ self.set(namespace=namespace, key=key_args, value=result)
return result
| "update" caches public suffix list to wrong directory
Hi!
First off, I love `tldextract`, thanks for building it!
The way we use `tldextract` is slightly special, but used to be fully supported by the public API. Our docker containers don't have internet access, so when we build them, we cache the latest public suffix list. When our applications use `tldextract`, we configure it so that it uses the cache, and never needs an internet connection.
Upon upgrading to any 3.* version of `tldextract`, I noticed that the cache was no longer being used to look up information from the public suffix list.
Problem reproduction steps
----------------------------
First, run the command: `tldextract --update --private_domains`
Then create a basic test file:
```python
import os
from tldextract import TLDExtract
extractor = TLDExtract(cache_dir=os.environ["TLDEXTRACT_CACHE"])
extractor("www.google.com")
```
Now, create a conditional breakpoint [here](https://github.com/john-kurkowski/tldextract/blob/master/tldextract/cache.py#L93), where the condition is that `namespace` equals `publicsuffix.org-tlds`.
### Expected behaviour
When running the above program, the break point should be hit, but should not throw a `KeyError`.
### Actual behaviour
The breakpoint is hit once during the `__call__(…)`, and immediately throws a `KeyError` because it can't find the cache file.
Explanation
------------
The method `run_and_cache` accepts a namespace, which is used to calculate the cache file path. But when the file is downloaded, it uses the hardcoded namespace "urls", which places the file in the wrong location.
I'll write a PR that fixes this problem. | john-kurkowski/tldextract | diff --git a/tests/test_cache.py b/tests/test_cache.py
index cb82f4f..3ae30d3 100644
--- a/tests/test_cache.py
+++ b/tests/test_cache.py
@@ -2,7 +2,8 @@
import os.path
import sys
import types
-from typing import Any, cast
+from typing import Any, Dict, Hashable, cast
+from unittest.mock import Mock
import pytest
import tldextract.cache
@@ -72,3 +73,29 @@ def test_get_cache_dir(monkeypatch):
monkeypatch.setenv("TLDEXTRACT_CACHE", "/alt-tld-cache")
assert get_cache_dir() == "/alt-tld-cache"
+
+
+def test_run_and_cache(tmpdir):
+ cache = DiskCache(tmpdir)
+
+ return_value1 = "unique return value"
+ some_fn = Mock(return_value=return_value1)
+ kwargs1: Dict[str, Hashable] = {"value": 1}
+
+ assert some_fn.call_count == 0
+
+ call1 = cache.run_and_cache(some_fn, "test_namespace", kwargs1, kwargs1.keys())
+ assert call1 == return_value1
+ assert some_fn.call_count == 1
+
+ call2 = cache.run_and_cache(some_fn, "test_namespace", kwargs1, kwargs1.keys())
+ assert call2 == return_value1
+ assert some_fn.call_count == 1
+
+ kwargs2: Dict[str, Hashable] = {"value": 2}
+ return_value2 = "another return value"
+ some_fn.return_value = return_value2
+
+ call3 = cache.run_and_cache(some_fn, "test_namespace", kwargs2, kwargs2.keys())
+ assert call3 == return_value2
+ assert some_fn.call_count == 2
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 3.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pylint",
"pytest",
"pytest-gitignore",
"pytest-mock",
"responses"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==3.3.9
certifi==2025.1.31
charset-normalizer==3.4.1
dill==0.3.9
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==6.0.1
mccabe==0.7.0
packaging @ file:///croot/packaging_1734472117206/work
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pylint==3.3.6
pytest @ file:///croot/pytest_1738938843180/work
pytest-gitignore==1.3
pytest-mock==3.14.0
PyYAML==6.0.2
requests==2.32.3
requests-file==2.1.0
responses==0.25.7
-e git+https://github.com/john-kurkowski/tldextract.git@86c82c3a9e3e9a2980a39ce8b77e1ad0bdebcc12#egg=tldextract
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tomlkit==0.13.2
typing_extensions==4.13.0
urllib3==2.3.0
| name: tldextract
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==3.3.9
- certifi==2025.1.31
- charset-normalizer==3.4.1
- dill==0.3.9
- filelock==3.18.0
- idna==3.10
- isort==6.0.1
- mccabe==0.7.0
- platformdirs==4.3.7
- pylint==3.3.6
- pytest-gitignore==1.3
- pytest-mock==3.14.0
- pyyaml==6.0.2
- requests==2.32.3
- requests-file==2.1.0
- responses==0.25.7
- tomlkit==0.13.2
- typing-extensions==4.13.0
- urllib3==2.3.0
prefix: /opt/conda/envs/tldextract
| [
"tests/test_cache.py::test_run_and_cache"
] | [] | [
"tests/test_cache.py::test_disk_cache",
"tests/test_cache.py::test_get_pkg_unique_identifier",
"tests/test_cache.py::test_get_cache_dir"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,433 | 287 | [
"tldextract/cache.py"
] |
|
pymc-devs__pymc-5614 | b895e40a2272210ee51519fa0431b6170b46b66e | 2022-03-18 13:49:07 | c8525eb1ff771e8a60b0b5022a9938594f48dc18 | codecov[bot]: # [Codecov](https://codecov.io/gh/pymc-devs/pymc/pull/5614?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs) Report
> Merging [#5614](https://codecov.io/gh/pymc-devs/pymc/pull/5614?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs) (9b7a51d) into [main](https://codecov.io/gh/pymc-devs/pymc/commit/144b0baa1a560cfe1d75ace2a0b8fb70f8a26448?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs) (144b0ba) will **decrease** coverage by `40.14%`.
> The diff coverage is `80.00%`.
[](https://codecov.io/gh/pymc-devs/pymc/pull/5614?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs)
```diff
@@ Coverage Diff @@
## main #5614 +/- ##
===========================================
- Coverage 87.68% 47.54% -40.15%
===========================================
Files 76 76
Lines 13702 13702
===========================================
- Hits 12014 6514 -5500
- Misses 1688 7188 +5500
```
| [Impacted Files](https://codecov.io/gh/pymc-devs/pymc/pull/5614?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs) | Coverage Δ | |
|---|---|---|
| [pymc/distributions/logprob.py](https://codecov.io/gh/pymc-devs/pymc/pull/5614/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9kaXN0cmlidXRpb25zL2xvZ3Byb2IucHk=) | `62.28% <80.00%> (-34.05%)` | :arrow_down: |
| [pymc/ode/utils.py](https://codecov.io/gh/pymc-devs/pymc/pull/5614/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9vZGUvdXRpbHMucHk=) | `15.62% <0.00%> (-84.38%)` | :arrow_down: |
| [pymc/step\_methods/mlda.py](https://codecov.io/gh/pymc-devs/pymc/pull/5614/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9zdGVwX21ldGhvZHMvbWxkYS5weQ==) | `12.53% <0.00%> (-83.85%)` | :arrow_down: |
| [pymc/variational/updates.py](https://codecov.io/gh/pymc-devs/pymc/pull/5614/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy92YXJpYXRpb25hbC91cGRhdGVzLnB5) | `11.33% <0.00%> (-80.79%)` | :arrow_down: |
| [pymc/smc/smc.py](https://codecov.io/gh/pymc-devs/pymc/pull/5614/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9zbWMvc21jLnB5) | `18.62% <0.00%> (-79.37%)` | :arrow_down: |
| [pymc/gp/gp.py](https://codecov.io/gh/pymc-devs/pymc/pull/5614/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9ncC9ncC5weQ==) | `18.68% <0.00%> (-74.50%)` | :arrow_down: |
| [pymc/tuning/starting.py](https://codecov.io/gh/pymc-devs/pymc/pull/5614/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy90dW5pbmcvc3RhcnRpbmcucHk=) | `19.51% <0.00%> (-73.18%)` | :arrow_down: |
| [pymc/step\_methods/slicer.py](https://codecov.io/gh/pymc-devs/pymc/pull/5614/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9zdGVwX21ldGhvZHMvc2xpY2VyLnB5) | `26.76% <0.00%> (-69.02%)` | :arrow_down: |
| [pymc/gp/cov.py](https://codecov.io/gh/pymc-devs/pymc/pull/5614/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy9ncC9jb3YucHk=) | `29.20% <0.00%> (-68.88%)` | :arrow_down: |
| [pymc/variational/inference.py](https://codecov.io/gh/pymc-devs/pymc/pull/5614/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs#diff-cHltYy92YXJpYXRpb25hbC9pbmZlcmVuY2UucHk=) | `19.38% <0.00%> (-67.86%)` | :arrow_down: |
| ... and [48 more](https://codecov.io/gh/pymc-devs/pymc/pull/5614/diff?src=pr&el=tree-more&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=pymc-devs) | |
ricardoV94: Failing test seems unrelated | diff --git a/pymc/distributions/logprob.py b/pymc/distributions/logprob.py
index 1b2846845..bb76f444e 100644
--- a/pymc/distributions/logprob.py
+++ b/pymc/distributions/logprob.py
@@ -24,6 +24,7 @@ from aeppl.logprob import logcdf as logcdf_aeppl
from aeppl.logprob import logprob as logp_aeppl
from aeppl.transforms import TransformValuesOpt
from aesara.graph.basic import graph_inputs, io_toposort
+from aesara.tensor.random.op import RandomVariable
from aesara.tensor.subtensor import (
AdvancedIncSubtensor,
AdvancedIncSubtensor1,
@@ -223,6 +224,26 @@ def joint_logpt(
tmp_rvs_to_values, extra_rewrites=transform_opt, use_jacobian=jacobian, **kwargs
)
+ # Raise if there are unexpected RandomVariables in the logp graph
+ # Only SimulatorRVs are allowed
+ from pymc.distributions.simulator import SimulatorRV
+
+ unexpected_rv_nodes = [
+ node
+ for node in aesara.graph.ancestors(list(temp_logp_var_dict.values()))
+ if (
+ node.owner
+ and isinstance(node.owner.op, RandomVariable)
+ and not isinstance(node.owner.op, SimulatorRV)
+ )
+ ]
+ if unexpected_rv_nodes:
+ raise ValueError(
+ f"Random variables detected in the logp graph: {unexpected_rv_nodes}.\n"
+ "This can happen when DensityDist logp or Interval transform functions "
+ "reference nonlocal variables."
+ )
+
# aeppl returns the logpt for every single value term we provided to it. This includes
# the extra values we plugged in above, so we filter those we actually wanted in the
# same order they were given in.
| Invalid logp expression if args aren't explicit in DensityDist
## Description of your problem
On the latest pymc version (777622aea4e5edbbc6051c1d1a0d32456e878b59) I get incorret logp graphs that contain sampled random variables if I use model variables in a closure of a density dist:
```python
import numpy as np
import pymc as pm
with pm.Model() as model:
a = pm.Normal("a")
pm.DensityDist("b", logp=lambda x: (x - a) ** 2, observed=np.array(3.))
```
The logp function of the model is not deterministic now, because it uses a *sampled* version of `a` in the logp function of `b` instead of the value from the value variable. You can see this in the aesara graph (the normal_rv op should not be in here):
```python
aesara.dprint(model.logpt)
```
```
Elemwise{add,no_inplace} [id A] '__logp'
|Elemwise{add,no_inplace} [id B] ''
| |Sum{acc_dtype=float64} [id C] ''
| | |TensorConstant{[]} [id D]
| |Sum{acc_dtype=float64} [id E] ''
| |Elemwise{mul,no_inplace} [id F] ''
| |Assert{msg='sigma > 0'} [id G] 'a_logprob'
| | |Elemwise{sub,no_inplace} [id H] ''
| | | |Elemwise{sub,no_inplace} [id I] ''
| | | | |Elemwise{mul,no_inplace} [id J] ''
| | | | | |TensorConstant{-0.5} [id K]
| | | | | |Elemwise{pow,no_inplace} [id L] ''
| | | | | |Elemwise{true_div,no_inplace} [id M] ''
| | | | | | |Elemwise{sub,no_inplace} [id N] ''
| | | | | | | |a [id O]
| | | | | | | |TensorConstant{0} [id P]
| | | | | | |TensorConstant{1.0} [id Q]
| | | | | |TensorConstant{2} [id R]
| | | | |Elemwise{log,no_inplace} [id S] ''
| | | | |TensorConstant{2.5066282746310002} [id T]
| | | |Elemwise{log,no_inplace} [id U] ''
| | | |TensorConstant{1.0} [id Q]
| | |All [id V] ''
| | |Elemwise{gt,no_inplace} [id W] ''
| | |TensorConstant{1.0} [id Q]
| | |TensorConstant{0.0} [id X]
| |TensorConstant{1.0} [id Y]
|Sum{acc_dtype=float64} [id Z] ''
|Elemwise{mul,no_inplace} [id BA] ''
|Elemwise{pow,no_inplace} [id BB] 'b_logprob'
| |Elemwise{sub,no_inplace} [id BC] ''
| | |TensorConstant{3.0} [id BD]
| | |normal_rv{0, (0, 0), floatX, False}.1 [id BE] 'a'
| | |RandomStateSharedVariable(<RandomState(MT19937) at 0x7F660912BC40>) [id BF]
| | |TensorConstant{[]} [id BG]
| | |TensorConstant{11} [id BH]
| | |TensorConstant{0} [id BI]
| | |TensorConstant{1.0} [id BJ]
| |TensorConstant{2} [id BK]
|TensorConstant{1.0} [id BL]
```
This can be fixed in user code by explicitly letting the DensityDist know about the parameter:
```python
with pm.Model() as model2:
a = pm.Normal("a")
pm.DensityDist("b", a, logp=lambda x, a_val: (x - a_val) ** 2, observed=np.array(3.))
```
Finding a bug like this in an actual model took @ferrine and me a couple of hours, so if we could either change `pm.logp` so that this just works or so that we get an error if there are remaining rv ops in a logp graph that would help a lot.
(cc @ricardoV94) | pymc-devs/pymc | diff --git a/pymc/tests/test_logprob.py b/pymc/tests/test_logprob.py
index 1afd513a0..f6ff39a2c 100644
--- a/pymc/tests/test_logprob.py
+++ b/pymc/tests/test_logprob.py
@@ -28,6 +28,7 @@ from aesara.tensor.subtensor import (
Subtensor,
)
+from pymc import DensityDist
from pymc.aesaraf import floatX, walk_model
from pymc.distributions.continuous import HalfFlat, Normal, TruncatedNormal, Uniform
from pymc.distributions.discrete import Bernoulli
@@ -217,3 +218,12 @@ def test_model_unchanged_logprob_access():
model.logpt()
new_inputs = set(aesara.graph.graph_inputs([c]))
assert original_inputs == new_inputs
+
+
+def test_unexpected_rvs():
+ with Model() as model:
+ x = Normal("x")
+ y = DensityDist("y", logp=lambda *args: x)
+
+ with pytest.raises(ValueError, match="^Random variables detected in the logp graph"):
+ model.logpt()
diff --git a/pymc/tests/test_parallel_sampling.py b/pymc/tests/test_parallel_sampling.py
index 78dad2c68..d321ef868 100644
--- a/pymc/tests/test_parallel_sampling.py
+++ b/pymc/tests/test_parallel_sampling.py
@@ -201,11 +201,11 @@ def test_spawn_densitydist_bound_method():
N = 100
with pm.Model() as model:
mu = pm.Normal("mu", 0, 1)
- normal_dist = pm.Normal.dist(mu, 1, size=N)
- def logp(x):
+ def logp(x, mu):
+ normal_dist = pm.Normal.dist(mu, 1, size=N)
out = pm.logp(normal_dist, x)
return out
- obs = pm.DensityDist("density_dist", logp=logp, observed=np.random.randn(N), size=N)
+ obs = pm.DensityDist("density_dist", mu, logp=logp, observed=np.random.randn(N), size=N)
pm.sample(draws=10, tune=10, step=pm.Metropolis(), cores=2, mp_ctx="spawn")
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_git_commit_hash",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | 4.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.10",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aeppl==0.0.27
aesara==2.5.1
arviz==0.21.0
cachetools==5.5.2
cloudpickle==3.1.1
cons==0.4.6
contourpy==1.3.1
coverage==7.8.0
cycler==0.12.1
etuples==0.3.9
exceptiongroup==1.2.2
fastprogress==1.0.3
filelock==3.18.0
fonttools==4.56.0
h5netcdf==1.6.1
h5py==3.13.0
iniconfig==2.1.0
kiwisolver==1.4.8
logical-unification==0.4.6
matplotlib==3.10.1
miniKanren==1.0.3
multipledispatch==1.0.0
numpy==2.2.4
packaging==24.2
pandas==2.2.3
pillow==11.1.0
pluggy==1.5.0
-e git+https://github.com/pymc-devs/pymc.git@b895e40a2272210ee51519fa0431b6170b46b66e#egg=pymc
pyparsing==3.2.3
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
scipy==1.15.2
six==1.17.0
tomli==2.2.1
toolz==1.0.0
typing_extensions==4.13.0
tzdata==2025.2
xarray==2025.3.1
xarray-einstats==0.8.0
| name: pymc
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- bzip2=1.0.8=h5eee18b_6
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libuuid=1.41.5=h5eee18b_0
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py310h06a4308_0
- python=3.10.16=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py310h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py310h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aeppl==0.0.27
- aesara==2.5.1
- arviz==0.21.0
- cachetools==5.5.2
- cloudpickle==3.1.1
- cons==0.4.6
- contourpy==1.3.1
- coverage==7.8.0
- cycler==0.12.1
- etuples==0.3.9
- exceptiongroup==1.2.2
- fastprogress==1.0.3
- filelock==3.18.0
- fonttools==4.56.0
- h5netcdf==1.6.1
- h5py==3.13.0
- iniconfig==2.1.0
- kiwisolver==1.4.8
- logical-unification==0.4.6
- matplotlib==3.10.1
- minikanren==1.0.3
- multipledispatch==1.0.0
- numpy==2.2.4
- packaging==24.2
- pandas==2.2.3
- pillow==11.1.0
- pluggy==1.5.0
- pymc==4.0.0b4
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scipy==1.15.2
- six==1.17.0
- tomli==2.2.1
- toolz==1.0.0
- typing-extensions==4.13.0
- tzdata==2025.2
- xarray==2025.3.1
- xarray-einstats==0.8.0
prefix: /opt/conda/envs/pymc
| [
"pymc/tests/test_logprob.py::test_unexpected_rvs"
] | [
"pymc/tests/test_logprob.py::test_joint_logpt_incsubtensor[indices0-5]",
"pymc/tests/test_logprob.py::test_joint_logpt_incsubtensor[indices1-5]",
"pymc/tests/test_logprob.py::test_joint_logpt_incsubtensor[indices2-5]",
"pymc/tests/test_logprob.py::test_joint_logpt_incsubtensor[indices3-size3]",
"pymc/tests/test_logprob.py::test_joint_logpt_subtensor",
"pymc/tests/test_logprob.py::test_logp_helper",
"pymc/tests/test_logprob.py::test_logcdf_helper",
"pymc/tests/test_logprob.py::test_logcdf_transformed_argument",
"pymc/tests/test_parallel_sampling.py::test_context",
"pymc/tests/test_parallel_sampling.py::test_bad_unpickle",
"pymc/tests/test_parallel_sampling.py::test_remote_pipe_closed",
"pymc/tests/test_parallel_sampling.py::test_explicit_sample[spawn]",
"pymc/tests/test_parallel_sampling.py::test_explicit_sample[fork]",
"pymc/tests/test_parallel_sampling.py::test_iterator",
"pymc/tests/test_parallel_sampling.py::test_spawn_densitydist_function",
"pymc/tests/test_parallel_sampling.py::test_spawn_densitydist_bound_method"
] | [
"pymc/tests/test_logprob.py::test_joint_logpt_basic",
"pymc/tests/test_logprob.py::test_model_unchanged_logprob_access"
] | [] | Apache License 2.0 | 12,438 | 442 | [
"pymc/distributions/logprob.py"
] |
M0r13n__pyais-56 | cc47fb1bf56eaabceb1657770d986a3172bac19a | 2022-03-19 14:33:46 | e833f46cbaa157aa022113c10a5630c002104459 | diff --git a/pyais/messages.py b/pyais/messages.py
index 07478b9..3cdd34d 100644
--- a/pyais/messages.py
+++ b/pyais/messages.py
@@ -563,7 +563,7 @@ class MessageType4(Payload):
lat = bit_field(27, int, from_converter=from_lat_lon, to_converter=to_lat_lon, signed=True, default=0)
epfd = bit_field(4, int, default=0, from_converter=EpfdType.from_value, to_converter=EpfdType.from_value)
spare = bit_field(10, int, default=0)
- raim = bit_field(1, int, default=0)
+ raim = bit_field(1, bool, default=0)
radio = bit_field(19, int, default=0)
@@ -669,7 +669,7 @@ class MessageType9(Payload):
dte = bit_field(1, int, default=0)
spare = bit_field(3, int, default=0)
assigned = bit_field(1, int, default=0)
- raim = bit_field(1, int, default=0)
+ raim = bit_field(1, bool, default=0)
radio = bit_field(20, int, default=0)
@@ -1297,7 +1297,7 @@ class MessageType27(Payload):
mmsi = bit_field(30, int, from_converter=from_mmsi, to_converter=to_mmsi)
accuracy = bit_field(1, int, default=0)
- raim = bit_field(1, int, default=0)
+ raim = bit_field(1, bool, default=0)
status = bit_field(4, int, default=0, from_converter=NavigationStatus, to_converter=NavigationStatus)
lon = bit_field(18, int, from_converter=from_lat_lon_600, to_converter=to_lat_lon_600, default=0)
lat = bit_field(17, int, from_converter=from_lat_lon_600, to_converter=to_lat_lon_600, default=0)
| RAIM field integer in message type 4, otherwise boolean?
I've found in messages of type 4 the RAIM flag is an integer. But for other messages it is a boolean? I'm not sure if this is a bug or by design? | M0r13n/pyais | diff --git a/tests/test_decode.py b/tests/test_decode.py
index 8442ca3..c1ac917 100644
--- a/tests/test_decode.py
+++ b/tests/test_decode.py
@@ -101,6 +101,7 @@ class TestAIS(unittest.TestCase):
assert msg['second'] == 34
assert msg['maneuver'] == ManeuverIndicator.NotAvailable
assert msg['raim']
+ assert isinstance(msg['raim'], bool)
def test_msg_type_1_c(self):
msg = decode(b"!AIVDM,1,1,,B,181:Kjh01ewHFRPDK1s3IRcn06sd,0*08").asdict()
@@ -226,6 +227,7 @@ class TestAIS(unittest.TestCase):
assert msg['dte'] == 1
assert msg['radio'] == 33392
assert not msg['raim']
+ assert isinstance(msg['raim'], bool)
def test_msg_type_10_a(self):
msg = decode(b"!AIVDM,1,1,,B,:5MlU41GMK6@,0*6C").asdict()
@@ -360,6 +362,7 @@ class TestAIS(unittest.TestCase):
assert msg['msg22'] == 1
assert not msg['assigned']
assert not msg['raim']
+ assert isinstance(msg['raim'], bool)
def test_msg_type_19(self):
msg = decode(b"!AIVDM,1,1,,B,C5N3SRgPEnJGEBT>NhWAwwo862PaLELTBJ:V00000000S0D:R220,0*0B").asdict()
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 2.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y python3-dev"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
attrs==25.3.0
babel==2.17.0
backports.tarfile==1.2.0
bitarray==3.3.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
coverage==7.8.0
cryptography==44.0.2
docutils==0.21.2
exceptiongroup==1.2.2
flake8==7.2.0
id==1.5.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
Jinja2==3.1.6
keyring==25.6.0
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mccabe==0.7.0
mdurl==0.1.2
more-itertools==10.6.0
mypy==1.15.0
mypy-extensions==1.0.0
nh3==0.2.21
nose==1.3.7
packaging==24.2
pluggy==1.5.0
-e git+https://github.com/M0r13n/pyais.git@cc47fb1bf56eaabceb1657770d986a3172bac19a#egg=pyais
pycodestyle==2.13.0
pycparser==2.22
pyflakes==3.3.2
Pygments==2.19.1
pytest==8.3.5
readme_renderer==44.0
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
SecretStorage==3.3.3
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
tomli==2.2.1
twine==6.1.0
typing_extensions==4.13.0
urllib3==2.3.0
zipp==3.21.0
| name: pyais
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- attrs==25.3.0
- babel==2.17.0
- backports-tarfile==1.2.0
- bitarray==3.3.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- coverage==7.8.0
- cryptography==44.0.2
- docutils==0.21.2
- exceptiongroup==1.2.2
- flake8==7.2.0
- id==1.5.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- jinja2==3.1.6
- keyring==25.6.0
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- mccabe==0.7.0
- mdurl==0.1.2
- more-itertools==10.6.0
- mypy==1.15.0
- mypy-extensions==1.0.0
- nh3==0.2.21
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pycodestyle==2.13.0
- pycparser==2.22
- pyflakes==3.3.2
- pygments==2.19.1
- pytest==8.3.5
- readme-renderer==44.0
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- secretstorage==3.3.3
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- tomli==2.2.1
- twine==6.1.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/pyais
| [
"tests/test_decode.py::TestAIS::test_msg_type_9"
] | [] | [
"tests/test_decode.py::TestAIS::test_broken_messages",
"tests/test_decode.py::TestAIS::test_byte_stream",
"tests/test_decode.py::TestAIS::test_decode_and_merge",
"tests/test_decode.py::TestAIS::test_decode_out_of_order",
"tests/test_decode.py::TestAIS::test_decode_pos_1_2_3",
"tests/test_decode.py::TestAIS::test_empty_channel",
"tests/test_decode.py::TestAIS::test_issue_46_a",
"tests/test_decode.py::TestAIS::test_issue_46_b",
"tests/test_decode.py::TestAIS::test_issue_50",
"tests/test_decode.py::TestAIS::test_msg_too_short_enum_is_none",
"tests/test_decode.py::TestAIS::test_msg_type_10_a",
"tests/test_decode.py::TestAIS::test_msg_type_10_b",
"tests/test_decode.py::TestAIS::test_msg_type_11",
"tests/test_decode.py::TestAIS::test_msg_type_12_a",
"tests/test_decode.py::TestAIS::test_msg_type_12_b",
"tests/test_decode.py::TestAIS::test_msg_type_13",
"tests/test_decode.py::TestAIS::test_msg_type_14",
"tests/test_decode.py::TestAIS::test_msg_type_15_a",
"tests/test_decode.py::TestAIS::test_msg_type_15_b",
"tests/test_decode.py::TestAIS::test_msg_type_16",
"tests/test_decode.py::TestAIS::test_msg_type_17_a",
"tests/test_decode.py::TestAIS::test_msg_type_17_b",
"tests/test_decode.py::TestAIS::test_msg_type_18",
"tests/test_decode.py::TestAIS::test_msg_type_19",
"tests/test_decode.py::TestAIS::test_msg_type_1_a",
"tests/test_decode.py::TestAIS::test_msg_type_1_b",
"tests/test_decode.py::TestAIS::test_msg_type_1_c",
"tests/test_decode.py::TestAIS::test_msg_type_20",
"tests/test_decode.py::TestAIS::test_msg_type_21",
"tests/test_decode.py::TestAIS::test_msg_type_22_addressed",
"tests/test_decode.py::TestAIS::test_msg_type_22_broadcast",
"tests/test_decode.py::TestAIS::test_msg_type_23",
"tests/test_decode.py::TestAIS::test_msg_type_24",
"tests/test_decode.py::TestAIS::test_msg_type_25_a",
"tests/test_decode.py::TestAIS::test_msg_type_25_b",
"tests/test_decode.py::TestAIS::test_msg_type_25_c",
"tests/test_decode.py::TestAIS::test_msg_type_26_a",
"tests/test_decode.py::TestAIS::test_msg_type_26_b",
"tests/test_decode.py::TestAIS::test_msg_type_27",
"tests/test_decode.py::TestAIS::test_msg_type_3",
"tests/test_decode.py::TestAIS::test_msg_type_4_a",
"tests/test_decode.py::TestAIS::test_msg_type_4_b",
"tests/test_decode.py::TestAIS::test_msg_type_5",
"tests/test_decode.py::TestAIS::test_msg_type_6",
"tests/test_decode.py::TestAIS::test_msg_type_7",
"tests/test_decode.py::TestAIS::test_msg_type_8",
"tests/test_decode.py::TestAIS::test_msg_with_more_that_82_chars_payload",
"tests/test_decode.py::TestAIS::test_multiline_message",
"tests/test_decode.py::TestAIS::test_nmea_decode",
"tests/test_decode.py::TestAIS::test_nmea_decode_unknown_msg",
"tests/test_decode.py::TestAIS::test_none_value_converter_for_creation",
"tests/test_decode.py::TestAIS::test_none_value_converter_for_decoding",
"tests/test_decode.py::TestAIS::test_none_values_converter_for_all_messages",
"tests/test_decode.py::TestAIS::test_to_dict_non_enum",
"tests/test_decode.py::TestAIS::test_to_json",
"tests/test_decode.py::TestAIS::test_type_22_very_short",
"tests/test_decode.py::TestAIS::test_type_25_very_short",
"tests/test_decode.py::TestAIS::test_type_26_very_short"
] | [] | MIT License | 12,449 | 510 | [
"pyais/messages.py"
] |
|
ppizarror__pygame-menu-395 | ea51d1fe26fe8ff9977414a8dc6d0b80ff06f05e | 2022-03-20 23:02:35 | 7c281e9d4a8de59b2d53e4d8734b0d231967b8be | diff --git a/pygame_menu/_decorator.py b/pygame_menu/_decorator.py
index 62992743..3e57b65e 100644
--- a/pygame_menu/_decorator.py
+++ b/pygame_menu/_decorator.py
@@ -21,7 +21,7 @@ import pygame.gfxdraw as gfxdraw
from pygame_menu._base import Base
from pygame_menu.font import FontType
from pygame_menu.utils import assert_list_vector, assert_color, make_surface, \
- is_callable, assert_vector, uuid4, warn
+ assert_vector, uuid4, warn
from pygame_menu._types import List, Tuple2NumberType, ColorInputType, Tuple, \
Any, Dict, Union, NumberType, Tuple2IntType, Optional, Callable, NumberInstance, \
@@ -658,7 +658,7 @@ class Decorator(Base):
:param pass_args: If ``False`` function is called without (surface, object) as args
:return: ID of the decoration
"""
- assert is_callable(fun), 'fun must be a callable type'
+ assert callable(fun), 'fun must be a callable type'
assert isinstance(pass_args, bool)
if pass_args:
return self._add_decor(DECORATION_CALLABLE, prev, fun)
diff --git a/pygame_menu/menu.py b/pygame_menu/menu.py
index 47a9480e..47d7b85e 100644
--- a/pygame_menu/menu.py
+++ b/pygame_menu/menu.py
@@ -27,7 +27,7 @@ from pygame_menu.locals import ALIGN_CENTER, ALIGN_LEFT, ALIGN_RIGHT, \
from pygame_menu._scrollarea import ScrollArea, get_scrollbars_from_position
from pygame_menu.sound import Sound
from pygame_menu.themes import Theme, THEME_DEFAULT
-from pygame_menu.utils import is_callable, assert_vector, make_surface, warn, \
+from pygame_menu.utils import assert_vector, make_surface, warn, \
check_key_pressed_valid, mouse_motion_current_mouse_position, get_finger_pos, \
print_menu_widget_structure
from pygame_menu.widgets import Frame, Widget, MenuBar
@@ -779,7 +779,7 @@ class Menu(Base):
:param onbeforeopen: Onbeforeopen callback, it can be a function or None
:return: Self reference
"""
- assert is_callable(onbeforeopen) or onbeforeopen is None, \
+ assert callable(onbeforeopen) or onbeforeopen is None, \
'onbeforeopen must be callable (function-type) or None'
self._onbeforeopen = onbeforeopen
return self
@@ -806,7 +806,7 @@ class Menu(Base):
:param onupdate: Onupdate callback, it can be a function or None
:return: Self reference
"""
- assert is_callable(onupdate) or onupdate is None, \
+ assert callable(onupdate) or onupdate is None, \
'onupdate must be a callable (function-type) or None'
self._onupdate = onupdate
return self
@@ -832,7 +832,7 @@ class Menu(Base):
:param onclose: Onclose callback, it can be a function, a pygame-menu event, or None
:return: Self reference
"""
- assert is_callable(onclose) or _events.is_event(onclose) or onclose is None, \
+ assert callable(onclose) or _events.is_event(onclose) or onclose is None, \
'onclose must be a MenuAction (event), callable (function-type), or None'
if onclose == _events.NONE:
onclose = None
@@ -860,7 +860,7 @@ class Menu(Base):
:param onreset: Onreset callback, it can be a function or None
:return: Self reference
"""
- assert is_callable(onreset) or onreset is None, \
+ assert callable(onreset) or onreset is None, \
'onreset must be a callable (function-type) or None'
self._onreset = onreset
return self
@@ -882,7 +882,7 @@ class Menu(Base):
:return: Self reference
"""
if onwindowmouseover is not None:
- assert is_callable(onwindowmouseover), \
+ assert callable(onwindowmouseover), \
'onwindowmouseover must be callable (function-type) or None'
self._onwindowmouseover = onwindowmouseover
return self
@@ -904,7 +904,7 @@ class Menu(Base):
:return: Self reference
"""
if onwindowmouseleave is not None:
- assert is_callable(onwindowmouseleave), \
+ assert callable(onwindowmouseleave), \
'onwindowmouseleave must be callable (function-type) or None'
self._onwindowmouseleave = onwindowmouseleave
return self
@@ -926,7 +926,7 @@ class Menu(Base):
:return: Self reference
"""
if onwidgetchange is not None:
- assert is_callable(onwidgetchange), \
+ assert callable(onwidgetchange), \
'onwidgetchange must be callable (function-type) or None'
self._onwidgetchange = onwidgetchange
return self
@@ -948,7 +948,7 @@ class Menu(Base):
:return: Self reference
"""
if onmouseover is not None:
- assert is_callable(onmouseover), \
+ assert callable(onmouseover), \
'onmouseover must be callable (function-type) or None'
self._onmouseover = onmouseover
return self
@@ -970,7 +970,7 @@ class Menu(Base):
:return: Self reference
"""
if onmouseleave is not None:
- assert is_callable(onmouseleave), \
+ assert callable(onmouseleave), \
'onmouseleave must be callable (function-type) or None'
self._onmouseleave = onmouseleave
return self
@@ -1676,7 +1676,7 @@ class Menu(Base):
self.full_reset()
# If action is callable (function)
- elif is_callable(onclose):
+ elif callable(onclose):
try:
onclose(self)
except TypeError:
@@ -2867,7 +2867,7 @@ class Menu(Base):
# Check background function
bgfun_accept_menu = False
if bgfun:
- assert is_callable(bgfun), \
+ assert callable(bgfun), \
'background function must be callable (function-type) object'
try:
bgfun(self._current)
diff --git a/pygame_menu/themes.py b/pygame_menu/themes.py
index 9da90097..0a78e044 100644
--- a/pygame_menu/themes.py
+++ b/pygame_menu/themes.py
@@ -32,7 +32,7 @@ from pygame_menu.locals import POSITION_NORTHWEST, POSITION_SOUTHEAST, ALIGN_CEN
CURSOR_ARROW
from pygame_menu._scrollarea import get_scrollbars_from_position
from pygame_menu.utils import assert_alignment, assert_cursor, assert_vector, \
- assert_position, assert_color, is_callable, format_color, assert_position_vector
+ assert_position, assert_color, format_color, assert_position_vector
from pygame_menu.widgets import HighlightSelection, NoneSelection, MENUBAR_STYLE_ADAPTIVE, \
MENUBAR_STYLE_SIMPLE, MENUBAR_STYLE_TITLE_ONLY, MENUBAR_STYLE_TITLE_ONLY_DIAGONAL, \
MENUBAR_STYLE_NONE, MENUBAR_STYLE_UNDERLINE, MENUBAR_STYLE_UNDERLINE_TITLE
@@ -721,7 +721,7 @@ class Theme(object):
assert_alignment(value)
elif val_type == callable or val_type == 'function' or val_type == 'callable':
- assert is_callable(value), \
+ assert callable(value), \
'value must be callable type'
elif val_type == 'color':
diff --git a/pygame_menu/utils.py b/pygame_menu/utils.py
index 153d7eec..04bed641 100644
--- a/pygame_menu/utils.py
+++ b/pygame_menu/utils.py
@@ -22,7 +22,6 @@ __all__ = [
'format_color',
'get_cursor',
'get_finger_pos',
- 'is_callable',
'load_pygame_image_file',
'make_surface',
'mouse_motion_current_mouse_position',
@@ -42,10 +41,8 @@ __all__ = [
]
-import functools
import sys
import traceback
-import types
import uuid
import warnings
@@ -359,9 +356,10 @@ def is_callable(func: Any) -> bool:
:param func: Function object
:return: ``True`` if function
"""
- # noinspection PyTypeChecker
- return isinstance(func, (types.FunctionType, types.BuiltinFunctionType,
- types.MethodType, functools.partial))
+ e = 'is_callable(func) method will be removed in v5, consider using built-in' \
+ ' callable(func) method instead'
+ warnings.warn(e, DeprecationWarning)
+ return callable(func)
def load_pygame_image_file(image_path: str, **kwargs) -> 'pygame.Surface':
diff --git a/pygame_menu/version.py b/pygame_menu/version.py
index d21d275f..4ac92dfe 100644
--- a/pygame_menu/version.py
+++ b/pygame_menu/version.py
@@ -34,6 +34,6 @@ class Version(tuple):
patch = property(lambda self: self[2])
-vernum = Version(4, 2, 6)
+vernum = Version(4, 2, 7)
ver = str(vernum)
rev = ''
diff --git a/pygame_menu/widgets/core/widget.py b/pygame_menu/widgets/core/widget.py
index d74681de..6aba29c4 100644
--- a/pygame_menu/widgets/core/widget.py
+++ b/pygame_menu/widgets/core/widget.py
@@ -45,7 +45,7 @@ from pygame_menu.locals import POSITION_NORTHWEST, POSITION_SOUTHWEST, POSITION_
POSITION_SOUTHEAST, ALIGN_CENTER
from pygame_menu.sound import Sound
from pygame_menu.utils import make_surface, assert_alignment, assert_color, \
- assert_position, assert_vector, is_callable, parse_padding, uuid4, \
+ assert_position, assert_vector, parse_padding, uuid4, \
mouse_motion_current_mouse_position, PYGAME_V2, set_pygame_cursor, warn, \
get_cursor, ShadowGenerator
from pygame_menu.widgets.core.selection import Selection
@@ -484,7 +484,7 @@ class Widget(Base):
:return: Self reference
"""
if onchange:
- assert is_callable(onchange), \
+ assert callable(onchange), \
'onchange must be callable (function-type) or None'
self._onchange = onchange
return self
@@ -503,7 +503,7 @@ class Widget(Base):
:return: Self reference
"""
if onreturn:
- assert is_callable(onreturn), \
+ assert callable(onreturn), \
'onreturn must be callable (function-type) or None'
self._onreturn = onreturn
return self
@@ -522,7 +522,7 @@ class Widget(Base):
:return: Self reference
"""
if onselect:
- assert is_callable(onselect), \
+ assert callable(onselect), \
'onselect must be callable (function-type) or None'
self._onselect = onselect
return self
@@ -541,7 +541,7 @@ class Widget(Base):
:return: Self reference
"""
if onmouseover:
- assert is_callable(onmouseover), \
+ assert callable(onmouseover), \
'onmouseover must be callable (function-type) or None'
self._onmouseover = onmouseover
return self
@@ -560,7 +560,7 @@ class Widget(Base):
:return: Self reference
"""
if onmouseleave:
- assert is_callable(onmouseleave), \
+ assert callable(onmouseleave), \
'onmouseleave must be callable (function-type) or None'
self._onmouseleave = onmouseleave
return self
@@ -2777,7 +2777,7 @@ class Widget(Base):
:param draw_callback: Function
:return: Callback ID
"""
- assert is_callable(draw_callback), \
+ assert callable(draw_callback), \
'draw callback must be callable (function-type)'
callback_id = uuid4()
self._draw_callbacks[callback_id] = draw_callback
@@ -2832,7 +2832,7 @@ class Widget(Base):
:param update_callback: Function
:return: Callback ID
"""
- assert is_callable(update_callback), \
+ assert callable(update_callback), \
'update callback must be callable (function-type)'
callback_id = uuid4()
self._update_callbacks[callback_id] = update_callback
diff --git a/pygame_menu/widgets/widget/button.py b/pygame_menu/widgets/widget/button.py
index 7d4a3b43..12b884fc 100644
--- a/pygame_menu/widgets/widget/button.py
+++ b/pygame_menu/widgets/widget/button.py
@@ -20,7 +20,7 @@ import webbrowser
from abc import ABC
from pygame_menu.locals import FINGERUP, CURSOR_HAND
-from pygame_menu.utils import is_callable, assert_color, get_finger_pos, warn
+from pygame_menu.utils import assert_color, get_finger_pos, warn
from pygame_menu.widgets.core.widget import AbstractWidgetManager, Widget
from pygame_menu._types import Any, CallbackType, Callable, Union, List, Tuple, \
@@ -89,7 +89,7 @@ class Button(Widget):
:param callback: Callback when selecting the widget, executed in :py:meth:`pygame_menu.widgets.core.widget.Widget.set_selected`
"""
if callback is not None:
- assert is_callable(callback), \
+ assert callable(callback), \
'callback must be callable (function-type) or None'
self._onselect = callback
@@ -107,7 +107,7 @@ class Button(Widget):
:param callback: Function
:param args: Arguments used by the function once triggered
"""
- assert is_callable(callback), \
+ assert callable(callback), \
'only callable (function-type) are allowed'
# If return is a Menu object, remove it from submenus list
@@ -395,7 +395,7 @@ class ButtonManager(AbstractWidgetManager, ABC):
widget = Button(title, button_id, self._menu.full_reset)
# If element is a function or callable
- elif is_callable(action):
+ elif callable(action):
if not accept_kwargs:
widget = Button(title, button_id, action, *args)
else:
diff --git a/pygame_menu/widgets/widget/dropselect_multiple.py b/pygame_menu/widgets/widget/dropselect_multiple.py
index 95bd1a3c..9194b33b 100644
--- a/pygame_menu/widgets/widget/dropselect_multiple.py
+++ b/pygame_menu/widgets/widget/dropselect_multiple.py
@@ -28,7 +28,7 @@ import pygame_menu
from abc import ABC
from pygame_menu.font import FontType
from pygame_menu.locals import POSITION_NORTHWEST, POSITION_SOUTHEAST
-from pygame_menu.utils import assert_color, assert_vector, is_callable
+from pygame_menu.utils import assert_color, assert_vector
from pygame_menu.widgets.core.widget import AbstractWidgetManager, Widget
from pygame_menu.widgets.widget.button import Button
from pygame_menu.widgets.widget.dropselect import DropSelect
@@ -326,7 +326,7 @@ class DropSelectMultiple(DropSelect):
elif isinstance(self._selection_placeholder_format, str):
return self._placeholder_selected.format(self._selection_placeholder_format.join(list_items))
- elif is_callable(self._selection_placeholder_format):
+ elif callable(self._selection_placeholder_format):
try:
o = self._selection_placeholder_format(list_items)
except TypeError:
diff --git a/pygame_menu/widgets/widget/label.py b/pygame_menu/widgets/widget/label.py
index 40215126..ac3fe6a2 100644
--- a/pygame_menu/widgets/widget/label.py
+++ b/pygame_menu/widgets/widget/label.py
@@ -17,7 +17,7 @@ import textwrap
import time
from abc import ABC
-from pygame_menu.utils import assert_color, is_callable, warn, uuid4
+from pygame_menu.utils import assert_color, warn, uuid4
from pygame_menu.widgets.core.widget import Widget, AbstractWidgetManager
from pygame_menu._types import Any, CallbackType, List, Union, Tuple, Optional, \
@@ -112,7 +112,7 @@ class Label(Widget):
:return: Self reference
"""
if generator is not None:
- assert is_callable(generator)
+ assert callable(generator)
self._title_generator = generator
# Update update widgets
diff --git a/pygame_menu/widgets/widget/menulink.py b/pygame_menu/widgets/widget/menulink.py
index f4c74772..85c8f198 100644
--- a/pygame_menu/widgets/widget/menulink.py
+++ b/pygame_menu/widgets/widget/menulink.py
@@ -17,7 +17,6 @@ import pygame_menu
from abc import ABC
from pygame_menu.widgets.core.widget import AbstractWidgetManager
from pygame_menu.widgets.widget.none import NoneWidget
-from pygame_menu.utils import is_callable
from pygame_menu._types import Callable
@@ -45,7 +44,7 @@ class MenuLink(NoneWidget):
link_id: str = ''
) -> None:
assert isinstance(menu, pygame_menu.Menu)
- assert is_callable(menu_opener_handler), \
+ assert callable(menu_opener_handler), \
'menu opener handler must be callable (a function)'
super(MenuLink, self).__init__(
widget_id=link_id
diff --git a/pygame_menu/widgets/widget/progressbar.py b/pygame_menu/widgets/widget/progressbar.py
index 8b5f0d08..bac40518 100644
--- a/pygame_menu/widgets/widget/progressbar.py
+++ b/pygame_menu/widgets/widget/progressbar.py
@@ -24,7 +24,7 @@ from abc import ABC
from pygame_menu.font import FontType, assert_font
from pygame_menu.locals import ALIGN_LEFT, ALIGN_CENTER
from pygame_menu.utils import assert_color, assert_vector, make_surface, \
- is_callable, assert_alignment, parse_padding
+ assert_alignment, parse_padding
from pygame_menu.widgets.core.widget import Widget, WidgetTransformationNotImplemented, \
AbstractWidgetManager
@@ -150,7 +150,7 @@ class ProgressBar(Widget):
# Check progress text
assert isinstance(progress_text_enabled, bool)
- assert is_callable(progress_text_format)
+ assert callable(progress_text_format)
assert isinstance(progress_text_format(0), str)
assert isinstance(progress_text_placeholder, str)
assert_alignment(progress_text_align)
diff --git a/pygame_menu/widgets/widget/rangeslider.py b/pygame_menu/widgets/widget/rangeslider.py
index fae66c28..def64235 100644
--- a/pygame_menu/widgets/widget/rangeslider.py
+++ b/pygame_menu/widgets/widget/rangeslider.py
@@ -29,7 +29,7 @@ from pygame_menu.locals import POSITION_NORTH, POSITION_SOUTH
from pygame_menu.font import FontType, assert_font
from pygame_menu.locals import FINGERUP, FINGERDOWN, FINGERMOTION
from pygame_menu.utils import check_key_pressed_valid, assert_color, assert_vector, \
- make_surface, get_finger_pos, assert_position, parse_padding, is_callable
+ make_surface, get_finger_pos, assert_position, parse_padding
from pygame_menu.widgets.core.widget import Widget, WidgetTransformationNotImplemented, \
AbstractWidgetManager
@@ -375,7 +375,7 @@ class RangeSlider(Widget):
assert isinstance(slider_text_value_triangle, bool)
# Check the value format function
- assert is_callable(value_format)
+ assert callable(value_format)
assert isinstance(value_format(0), str), \
'value_format must be a function that accepts only 1 argument ' \
'(value) and must return a string'
| Function `is_callable` does not accept callable objects
**Describe the bug**
The `is_callable` function is supposed to check that an object is callable, for example to add it as a button action. There are two problems with it:
1. There is already a builtin to do this: `callable`.
2. The `is_callable` function is more restrictive than `callable`: it only allows functions, methods and `partial` objects. It does not allow objects with a `__call__` method, for no particular reason.
I think that unless there is a powerful reason not to, the `is_callable` function should be replaced by the `callable` builtin.
| ppizarror/pygame-menu | diff --git a/test/test_themes.py b/test/test_themes.py
index 5ceaf8d1..c486d4e3 100644
--- a/test/test_themes.py
+++ b/test/test_themes.py
@@ -222,7 +222,6 @@ class ThemeTest(BaseTest):
[pygame_menu.locals.POSITION_WEST, bool]
]
- self.assertRaises(AssertionError, lambda: t._get({}, '', 'callable', bool))
self.assertRaises(AssertionError, lambda: t._get({}, '', 'color', [1, 1, 1, 256]))
self.assertRaises(AssertionError, lambda: t._get({}, '', 'color', [11, 1, -1]))
self.assertRaises(AssertionError, lambda: t._get({}, '', 'color', [11, 1, -1]))
diff --git a/test/test_utils.py b/test/test_utils.py
index c81bf283..84fca73f 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -18,6 +18,13 @@ import pygame_menu.utils as ut
class UtilsTest(BaseTest):
+ def test_callable(self) -> None:
+ """
+ Test is callable.
+ """
+ self.assertTrue(ut.is_callable(bool))
+ self.assertFalse(ut.is_callable(1))
+
def test_position_str(self) -> None:
"""
Test position assert values as str.
diff --git a/test/test_widget_button.py b/test/test_widget_button.py
index 3ae5116f..af6421a1 100644
--- a/test/test_widget_button.py
+++ b/test/test_widget_button.py
@@ -35,8 +35,6 @@ class ButtonWidgetTest(BaseTest):
# Invalid ones
invalid = [
- bool, # type
- object, # object
1, # int
'a', # str
True, # bool
@@ -66,7 +64,7 @@ class ButtonWidgetTest(BaseTest):
self.assertTrue(menu.add.button('b1', v) is not None)
btn = menu.add.button('b1', menu2)
- for v in [menu, 1, bool, object, [1, 2, 3], (1, 2, 3)]:
+ for v in [menu, 1, [1, 2, 3], (1, 2, 3)]:
self.assertRaises(AssertionError, lambda: btn.update_callback(v))
btn.update_callback(test)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 12
} | 4.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
codecov==2.1.13
coverage==7.8.0
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
pygame==2.6.1
-e git+https://github.com/ppizarror/pygame-menu.git@ea51d1fe26fe8ff9977414a8dc6d0b80ff06f05e#egg=pygame_menu
pyperclip==1.9.0
pytest==8.3.5
requests==2.32.3
tomli==2.2.1
typing_extensions==4.13.0
urllib3==2.3.0
| name: pygame-menu
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- codecov==2.1.13
- coverage==7.8.0
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pygame==2.6.1
- pyperclip==1.9.0
- pytest==8.3.5
- requests==2.32.3
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==2.3.0
prefix: /opt/conda/envs/pygame-menu
| [
"test/test_utils.py::UtilsTest::test_callable"
] | [] | [
"test/test_themes.py::ThemeTest::test_copy",
"test/test_themes.py::ThemeTest::test_format_opacity",
"test/test_themes.py::ThemeTest::test_get",
"test/test_themes.py::ThemeTest::test_invalid_kwargs",
"test/test_themes.py::ThemeTest::test_methods",
"test/test_themes.py::ThemeTest::test_str_int_color",
"test/test_themes.py::ThemeTest::test_to_tuple",
"test/test_themes.py::ThemeTest::test_validation",
"test/test_utils.py::UtilsTest::test_padding",
"test/test_utils.py::UtilsTest::test_position_str",
"test/test_utils.py::UtilsTest::test_shadows",
"test/test_utils.py::UtilsTest::test_terminal_widget_title",
"test/test_widget_button.py::ButtonWidgetTest::test_button",
"test/test_widget_button.py::ButtonWidgetTest::test_empty_title",
"test/test_widget_button.py::ButtonWidgetTest::test_shadow",
"test/test_widget_button.py::ButtonWidgetTest::test_value"
] | [] | MIT License | 12,459 | 4,617 | [
"pygame_menu/_decorator.py",
"pygame_menu/menu.py",
"pygame_menu/themes.py",
"pygame_menu/utils.py",
"pygame_menu/version.py",
"pygame_menu/widgets/core/widget.py",
"pygame_menu/widgets/widget/button.py",
"pygame_menu/widgets/widget/dropselect_multiple.py",
"pygame_menu/widgets/widget/label.py",
"pygame_menu/widgets/widget/menulink.py",
"pygame_menu/widgets/widget/progressbar.py",
"pygame_menu/widgets/widget/rangeslider.py"
] |
|
googleapis__python-pubsub-622 | 8f02850168fc9d7dee57f3b38f31d23d77c4aa27 | 2022-03-22 02:53:02 | 8f02850168fc9d7dee57f3b38f31d23d77c4aa27 | diff --git a/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py b/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py
index e098491..4d9097f 100644
--- a/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py
+++ b/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py
@@ -20,7 +20,7 @@ import itertools
import logging
import threading
import typing
-from typing import Any, Callable, Iterable, List, Optional, Tuple, Union
+from typing import Any, Dict, Callable, Iterable, List, Optional, Tuple, Union
import uuid
import grpc # type: ignore
@@ -49,7 +49,6 @@ from google.rpc import status_pb2
if typing.TYPE_CHECKING: # pragma: NO COVER
from google.cloud.pubsub_v1 import subscriber
- from google.protobuf.internal import containers
_LOGGER = logging.getLogger(__name__)
@@ -141,9 +140,7 @@ def _get_status(exc: exceptions.GoogleAPICallError,) -> Optional["status_pb2.Sta
return None
-def _get_ack_errors(
- exc: exceptions.GoogleAPICallError,
-) -> Optional["containers.ScalarMap"]:
+def _get_ack_errors(exc: exceptions.GoogleAPICallError,) -> Optional[Dict[str, str]]:
status = _get_status(exc)
if not status:
_LOGGER.debug("Unable to get status of errored RPC.")
@@ -159,8 +156,8 @@ def _get_ack_errors(
def _process_requests(
error_status: Optional["status_pb2.Status"],
- ack_reqs_dict: "containers.ScalarMap",
- errors_dict: Optional["containers.ScalarMap"],
+ ack_reqs_dict: Dict[str, requests.AckRequest],
+ errors_dict: Optional[Dict[str, str]],
):
"""Process requests by referring to error_status and errors_dict.
@@ -182,9 +179,9 @@ def _process_requests(
exc = AcknowledgeError(AcknowledgeStatus.INVALID_ACK_ID, info=None)
else:
exc = AcknowledgeError(AcknowledgeStatus.OTHER, exactly_once_error)
-
future = ack_reqs_dict[ack_id].future
- future.set_exception(exc)
+ if future is not None:
+ future.set_exception(exc)
requests_completed.append(ack_reqs_dict[ack_id])
# Temporary GRPC errors are retried
elif (
@@ -201,12 +198,14 @@ def _process_requests(
else:
exc = AcknowledgeError(AcknowledgeStatus.OTHER, str(error_status))
future = ack_reqs_dict[ack_id].future
- future.set_exception(exc)
+ if future is not None:
+ future.set_exception(exc)
requests_completed.append(ack_reqs_dict[ack_id])
# Since no error occurred, requests with futures are completed successfully.
elif ack_reqs_dict[ack_id].future:
future = ack_reqs_dict[ack_id].future
# success
+ assert future is not None
future.set_result(AcknowledgeStatus.SUCCESS)
requests_completed.append(ack_reqs_dict[ack_id])
# All other requests are considered completed.
| my/py failures on streaming_pull_manager
#### Steps to reproduce
nox > python -m pip install -e '.[all]'
nox > python -m pip install mypy==0.910
nox > python -m pip install types-protobuf types-setuptools
nox > python -m pip install 'google-api-core[grpc]>=2.1.1' 'google-cloud-core>=2.2.0'
nox > mypy google/cloud
#### Stack trace
```
google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py:178: error: _ScalarV? has no attribute "startswith"
google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py:186: error: _ScalarV? has no attribute "future"
google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py:203: error: _ScalarV? has no attribute "future"
google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py:207: error: _ScalarV? has no attribute "future"
google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py:208: error: _ScalarV? has no attribute "future"
```
| googleapis/python-pubsub | diff --git a/tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py b/tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py
index 36f82b6..e9554de 100644
--- a/tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py
+++ b/tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py
@@ -1713,6 +1713,21 @@ def test_process_requests_no_errors():
assert not requests_to_retry
+def test_process_requests_no_errors_no_future():
+ # no errors, request should be completed, even when future is None.
+ ack_reqs_dict = {
+ "ackid1": requests.AckRequest(
+ ack_id="ackid1", byte_size=0, time_to_ack=20, ordering_key="", future=None
+ )
+ }
+ errors_dict = {}
+ requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
+ None, ack_reqs_dict, errors_dict
+ )
+ assert requests_completed[0].ack_id == "ackid1"
+ assert not requests_to_retry
+
+
def test_process_requests_permanent_error_raises_exception():
# a permanent error raises an exception
future = futures.Future()
@@ -1735,6 +1750,40 @@ def test_process_requests_permanent_error_raises_exception():
assert not requests_to_retry
+def test_process_requests_permanent_error_other_raises_exception():
+ # a permanent error of other raises an exception
+ future = futures.Future()
+ ack_reqs_dict = {
+ "ackid1": requests.AckRequest(
+ ack_id="ackid1", byte_size=0, time_to_ack=20, ordering_key="", future=future
+ )
+ }
+ errors_dict = {"ackid1": "PERMANENT_FAILURE_OTHER"}
+ requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
+ None, ack_reqs_dict, errors_dict
+ )
+ assert requests_completed[0].ack_id == "ackid1"
+ with pytest.raises(subscriber_exceptions.AcknowledgeError) as exc_info:
+ future.result()
+ assert exc_info.value.error_code == subscriber_exceptions.AcknowledgeStatus.OTHER
+ assert not requests_to_retry
+
+
+def test_process_requests_permanent_error_other_raises_exception_no_future():
+ # with a permanent error, request is completed even when future is None.
+ ack_reqs_dict = {
+ "ackid1": requests.AckRequest(
+ ack_id="ackid1", byte_size=0, time_to_ack=20, ordering_key="", future=None
+ )
+ }
+ errors_dict = {"ackid1": "PERMANENT_FAILURE_OTHER"}
+ requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
+ None, ack_reqs_dict, errors_dict
+ )
+ assert requests_completed[0].ack_id == "ackid1"
+ assert not requests_to_retry
+
+
def test_process_requests_transient_error_returns_request_for_retrying():
# a transient error returns the request in `requests_to_retry`
future = futures.Future()
@@ -1872,6 +1921,23 @@ def test_process_requests_other_error_status_raises_exception():
assert not requests_to_retry
+def test_process_requests_other_error_status_raises_exception_no_future():
+ # with an unrecognized error status, requests are completed, even when
+ # future is None.
+ ack_reqs_dict = {
+ "ackid1": requests.AckRequest(
+ ack_id="ackid1", byte_size=0, time_to_ack=20, ordering_key="", future=None
+ )
+ }
+ st = status_pb2.Status()
+ st.code = code_pb2.Code.OUT_OF_RANGE
+ requests_completed, requests_to_retry = streaming_pull_manager._process_requests(
+ st, ack_reqs_dict, None
+ )
+ assert requests_completed[0].ack_id == "ackid1"
+ assert not requests_to_retry
+
+
def test_process_requests_mixed_success_and_failure_acks():
# mixed success and failure (acks)
future1 = futures.Future()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 2.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-asyncio",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
google-api-core==2.24.2
google-auth==2.38.0
-e git+https://github.com/googleapis/python-pubsub.git@8f02850168fc9d7dee57f3b38f31d23d77c4aa27#egg=google_cloud_pubsub
googleapis-common-protos==1.69.2
grpc-google-iam-v1==0.12.7
grpcio==1.71.0
grpcio-status==1.63.0rc1
idna==3.10
iniconfig==2.1.0
mock==5.2.0
packaging==24.2
pluggy==1.5.0
proto-plus==1.26.1
protobuf==4.25.6
pyasn1==0.6.1
pyasn1_modules==0.4.2
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
requests==2.32.3
rsa==4.9
tomli==2.2.1
typing_extensions==4.13.0
urllib3==2.3.0
| name: python-pubsub
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- google-api-core==2.24.2
- google-auth==2.38.0
- googleapis-common-protos==1.69.2
- grpc-google-iam-v1==0.12.7
- grpcio==1.71.0
- grpcio-status==1.63.0rc1
- idna==3.10
- iniconfig==2.1.0
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- proto-plus==1.26.1
- protobuf==4.25.6
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- requests==2.32.3
- rsa==4.9
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==2.3.0
prefix: /opt/conda/envs/python-pubsub
| [
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_permanent_error_other_raises_exception_no_future",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_other_error_status_raises_exception_no_future"
] | [] | [
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__wrap_as_exception[exception0-ValueError]",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__wrap_as_exception[exception1-GoogleAPICallError]",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__wrap_as_exception[exception2-Exception]",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__wrap_as_exception[something",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__wrap_callback_errors_no_error",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__wrap_callback_errors_error",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_constructor_and_default_state",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_constructor_with_options",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__obtain_ack_deadline_no_custom_flow_control_setting",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__obtain_ack_deadline_with_max_duration_per_lease_extension",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__obtain_ack_deadline_with_min_duration_per_lease_extension",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__obtain_ack_deadline_with_max_duration_per_lease_extension_too_low",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__obtain_ack_deadline_with_min_duration_per_lease_extension_too_high",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__obtain_ack_deadline_with_exactly_once_enabled",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__obtain_ack_deadline_with_min_duration_per_lease_extension_with_exactly_once_enabled",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__obtain_ack_deadline_no_value_update",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_client_id",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_streaming_flow_control",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_streaming_flow_control_use_legacy_flow_control",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_maybe_pause_consumer_wo_consumer_set",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_lease_load_and_pause",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_drop_and_resume",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_resume_not_paused",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_maybe_resume_consumer_wo_consumer_set",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__maybe_release_messages_on_overload",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__maybe_release_messages_below_overload",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__maybe_release_messages_negative_on_hold_bytes_warning",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_send_unary_ack",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_send_unary_modack",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_send_unary_ack_api_call_error",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_send_unary_modack_api_call_error",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_send_unary_ack_retry_error",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_send_unary_modack_retry_error",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_heartbeat",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_heartbeat_inactive",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_heartbeat_stream_ack_deadline_seconds",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_open",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_open_already_active",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_open_has_been_closed",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_close",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_close_inactive_consumer",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_close_idempotent",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_close_no_dispatcher_error",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_close_callbacks",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_close_blocking_scheduler_shutdown",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_close_nonblocking_scheduler_shutdown",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_close_nacks_internally_queued_messages",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__get_initial_request",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__get_initial_request_wo_leaser",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__on_response_delivery_attempt",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__on_response_modifies_ack_deadline",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__on_response_modifies_ack_deadline_with_exactly_once_min_lease",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__on_response_send_ack_deadline_after_enabling_exactly_once",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__on_response_no_leaser_overload",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__on_response_with_leaser_overload",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__on_response_none_data",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__on_response_with_ordering_keys",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__on_response_enable_exactly_once",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__on_response_disable_exactly_once",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__on_response_exactly_once_immediate_modacks_fail",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__should_recover_true",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__should_recover_false",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__should_terminate_true",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__should_terminate_false",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test__on_rpc_done",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_activate_ordering_keys",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_activate_ordering_keys_stopped_scheduler",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_get_ack_errors_unable_to_unpack",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_get_ack_errors_no_response_obj",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_get_ack_errors_from_call_returned_none",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_get_ack_errors_value_error_thrown",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_get_ack_errors_no_error_details",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_get_ack_errors_detail_not_error_info",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_get_ack_errors_happy_case",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_no_requests",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_error_dict_is_none",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_no_errors_has_no_future",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_no_errors",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_no_errors_no_future",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_permanent_error_raises_exception",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_permanent_error_other_raises_exception",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_transient_error_returns_request_for_retrying",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_unknown_error_raises_exception",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_retriable_error_status_returns_request_for_retrying",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_permission_denied_error_status_raises_exception",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_failed_precondition_error_status_raises_exception",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_other_error_status_raises_exception",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_mixed_success_and_failure_acks",
"tests/unit/pubsub_v1/subscriber/test_streaming_pull_manager.py::test_process_requests_mixed_success_and_failure_modacks"
] | [] | Apache License 2.0 | 12,468 | 741 | [
"google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py"
] |
|
iris-hep__func_adl-100 | a8b3e1ff5e397f4dcd156e255bd3f26e819f53d0 | 2022-03-22 20:20:29 | a8b3e1ff5e397f4dcd156e255bd3f26e819f53d0 | diff --git a/func_adl/util_ast.py b/func_adl/util_ast.py
index 868029d..7b65cef 100644
--- a/func_adl/util_ast.py
+++ b/func_adl/util_ast.py
@@ -322,14 +322,28 @@ class _rewrite_captured_vars(ast.NodeTransformer):
def __init__(self, cv: inspect.ClosureVars):
self._lookup_dict: Dict[str, Any] = dict(cv.nonlocals)
self._lookup_dict.update(cv.globals)
+ self._ignore_stack = []
def visit_Name(self, node: ast.Name) -> Any:
+ if self.is_arg(node.id):
+ return node
+
if node.id in self._lookup_dict:
v = self._lookup_dict[node.id]
if not callable(v):
return as_literal(self._lookup_dict[node.id])
return node
+ def visit_Lambda(self, node: ast.Lambda) -> Any:
+ self._ignore_stack.append([a.arg for a in node.args.args])
+ v = super().generic_visit(node)
+ self._ignore_stack.pop()
+ return v
+
+ def is_arg(self, a_name: str) -> bool:
+ "If the arg is on the stack, then return true"
+ return any([a == a_name for frames in self._ignore_stack for a in frames])
+
def global_getclosurevars(f: Callable) -> inspect.ClosureVars:
"""Grab the closure over all passed function. Add all known global
| Lambda closure prefers global over local arg name
In the following code:
```python
sys_muon = (calib_tools.query_sys_error(ds, 'MUON_ID__1up')
.Select(lambda e: e.Muons())
.Select(lambda l_muons: [m for m in l_muons if m.pt() > 30000.0])
.Select(lambda l_muons: {
'pt': [m.pt() / 1000.0 for m in l_muons],
'eta': [m.eta() for m in l_muons],
'phi': [m.phi() for m in l_muons],
})
.AsAwkwardArray()
.value())
```
It will pick up `l_muons` if available in local variables or global space rather than the `lambda` argument. Totally violates how we think of python! | iris-hep/func_adl | diff --git a/tests/test_util_ast.py b/tests/test_util_ast.py
index e255a40..5d2284a 100644
--- a/tests/test_util_ast.py
+++ b/tests/test_util_ast.py
@@ -279,9 +279,23 @@ def test_parse_lambda_capture():
assert ast.dump(r) == ast.dump(r_true)
+def test_parse_lambda_capture_ignore_local():
+ x = 30 # NOQA type: ignore
+ r = parse_as_ast(lambda x: x > 20)
+ r_true = parse_as_ast(lambda y: y > 20)
+ assert ast.dump(r) == ast.dump(r_true).replace("'y'", "'x'")
+
+
g_cut_value = 30
+def test_parse_lambda_capture_ignore_global():
+ x = 30 # NOQA type: ignore
+ r = parse_as_ast(lambda g_cut_value: g_cut_value > 20)
+ r_true = parse_as_ast(lambda y: y > 20)
+ assert ast.dump(r) == ast.dump(r_true).replace("'y'", "'g_cut_value'")
+
+
def test_parse_lambda_capture_nested_global():
r = parse_as_ast(lambda x: (lambda y: y > g_cut_value)(x))
r_true = parse_as_ast(lambda x: (lambda y: y > 30)(x))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 3.010 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astunparse==1.6.3
backports.tarfile==1.2.0
black==25.1.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
coverage==7.8.0
cryptography==44.0.2
docutils==0.21.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
-e git+https://github.com/iris-hep/func_adl.git@a8b3e1ff5e397f4dcd156e255bd3f26e819f53d0#egg=func_adl
id==1.5.0
idna==3.10
importlib_metadata==8.6.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==6.0.1
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
keyring==25.6.0
make-it-sync==2.0.0
markdown-it-py==3.0.0
mccabe==0.7.0
mdurl==0.1.2
more-itertools==10.6.0
mypy-extensions==1.0.0
nh3==0.2.21
packaging @ file:///croot/packaging_1734472117206/work
pathspec==0.12.1
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pycodestyle==2.13.0
pycparser==2.22
pyflakes==3.3.2
Pygments==2.19.1
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
readme_renderer==44.0
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
SecretStorage==3.3.3
six==1.17.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
twine==6.1.0
typing_extensions==4.13.0
urllib3==2.3.0
zipp==3.21.0
| name: func_adl
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astunparse==1.6.3
- backports-tarfile==1.2.0
- black==25.1.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- coverage==7.8.0
- cryptography==44.0.2
- docutils==0.21.2
- flake8==7.2.0
- func-adl==0.0.1a1
- id==1.5.0
- idna==3.10
- importlib-metadata==8.6.1
- isort==6.0.1
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- keyring==25.6.0
- make-it-sync==2.0.0
- markdown-it-py==3.0.0
- mccabe==0.7.0
- mdurl==0.1.2
- more-itertools==10.6.0
- mypy-extensions==1.0.0
- nh3==0.2.21
- pathspec==0.12.1
- platformdirs==4.3.7
- pycodestyle==2.13.0
- pycparser==2.22
- pyflakes==3.3.2
- pygments==2.19.1
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- readme-renderer==44.0
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- secretstorage==3.3.3
- six==1.17.0
- twine==6.1.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/func_adl
| [
"tests/test_util_ast.py::test_parse_lambda_capture_ignore_global"
] | [] | [
"tests/test_util_ast.py::test_as_ast_integer",
"tests/test_util_ast.py::test_as_ast_string",
"tests/test_util_ast.py::test_as_ast_string_var",
"tests/test_util_ast.py::test_as_ast_list",
"tests/test_util_ast.py::test_function_call_simple",
"tests/test_util_ast.py::test_identity_is",
"tests/test_util_ast.py::test_identity_isnot_body",
"tests/test_util_ast.py::test_identity_isnot_args",
"tests/test_util_ast.py::test_identity_isnot_body_var",
"tests/test_util_ast.py::test_lambda_test_expression",
"tests/test_util_ast.py::test_lambda_assure_expression",
"tests/test_util_ast.py::test_lambda_assure_lambda",
"tests/test_util_ast.py::test_lambda_args",
"tests/test_util_ast.py::test_lambda_simple_ast_expr",
"tests/test_util_ast.py::test_lambda_build_single_arg",
"tests/test_util_ast.py::test_lambda_build_list_arg",
"tests/test_util_ast.py::test_lambda_build_proper",
"tests/test_util_ast.py::test_call_wrap_list_arg",
"tests/test_util_ast.py::test_call_wrap_single_arg",
"tests/test_util_ast.py::test_lambda_test_lambda_module",
"tests/test_util_ast.py::test_lambda_test_raw_lambda",
"tests/test_util_ast.py::test_lambda_is_true_yes",
"tests/test_util_ast.py::test_lambda_is_true_no",
"tests/test_util_ast.py::test_lambda_is_true_expression",
"tests/test_util_ast.py::test_lambda_is_true_non_lambda",
"tests/test_util_ast.py::test_lambda_replace_simple_expression",
"tests/test_util_ast.py::test_rewrite_oneliner",
"tests/test_util_ast.py::test_rewrite_twoliner",
"tests/test_util_ast.py::test_rewrite_noret",
"tests/test_util_ast.py::test_parse_as_ast_lambda",
"tests/test_util_ast.py::test_parse_as_str",
"tests/test_util_ast.py::test_parse_as_callable_simple",
"tests/test_util_ast.py::test_parse_nested_lambda",
"tests/test_util_ast.py::test_parse_lambda_capture",
"tests/test_util_ast.py::test_parse_lambda_capture_ignore_local",
"tests/test_util_ast.py::test_parse_lambda_capture_nested_global",
"tests/test_util_ast.py::test_parse_lambda_capture_nested_local",
"tests/test_util_ast.py::test_parse_simple_func",
"tests/test_util_ast.py::test_parse_global_simple_func",
"tests/test_util_ast.py::test_parse_global_capture",
"tests/test_util_ast.py::test_unknown_function",
"tests/test_util_ast.py::test_known_local_function",
"tests/test_util_ast.py::test_known_global_function",
"tests/test_util_ast.py::test_parse_continues",
"tests/test_util_ast.py::test_parse_continues_accross_lines",
"tests/test_util_ast.py::test_decorator_parse",
"tests/test_util_ast.py::test_indent_parse",
"tests/test_util_ast.py::test_two_deep_parse",
"tests/test_util_ast.py::test_parse_continues_one_line",
"tests/test_util_ast.py::test_parse_metadata_there",
"tests/test_util_ast.py::test_realign_no_indent",
"tests/test_util_ast.py::test_realign_indent_sp",
"tests/test_util_ast.py::test_realign_indent_tab",
"tests/test_util_ast.py::test_realign_indent_2lines",
"tests/test_util_ast.py::test_realign_indent_2lines_uneven"
] | [] | MIT License | 12,476 | 350 | [
"func_adl/util_ast.py"
] |
|
pre-commit__pre-commit-2301 | 934afb85a4a14a6adea62e694e631845edfb7132 | 2022-03-23 01:55:55 | 7602abc3cfa054bcec49e982fd3a90e026430dae | lorenzwalthert: This currently does not work because we also need to account for the fact that `R_HOME` might not be set (as it seems in the test suite). In that case,`bin/Rscript` Is returned.
To be true to the name of the function, maybe we should look up if `Rscript` is on the `PATH` and return that full path or - if `Rscript` is not on the `PATH` - return `$R_HOME/bin/Rscript`, and error if `$R_HOME` is not set.
jeff-m-sullivan: Of course! Sorry I missed that. I'll put together something more thorough along those lines.
lorenzwalthert: Cool. Let me know if you need help. Also, we should rename the function `_rscript_exec` to `_path_rscript_exec()` as it now always returns a path to the executable and it would be great if you could also test to `tests/languages/r_test.py`. You can ensure the same env as in the execution is present and set R home in a similar way:
Then, we should probably add a test along these lines.
```python
from pre_commit.languages.r import in_env, _path_rscript_exec
from pre_commit.envcontext import envcontext
@pytest.mark.parametrize(('r_home_env', 'rscript_path_expected') [("/usr/bin/local", "/user/bin/local/Rscript"), (UNSET, 'location is os specific: get it dynamically)])
def test_path_rscript_exec(r_home_env, rscript_path_expected):
with in_env(hook.prefix, hook.language_version):
with envcontext(("R_HOME", r_home_env)):
assert _path_rscript_exec() == rscript_path_expected
```
If it gives an error or takes too long, I can figure that out later.
lorenzwalthert: Actually we should not really need `with in_env…` | diff --git a/pre_commit/languages/r.py b/pre_commit/languages/r.py
index 9b32b2d..c736b38 100644
--- a/pre_commit/languages/r.py
+++ b/pre_commit/languages/r.py
@@ -59,7 +59,11 @@ def _prefix_if_non_local_file_entry(
def _rscript_exec() -> str:
- return os.path.join(os.getenv('R_HOME', ''), 'Rscript')
+ r_home = os.environ.get('R_HOME')
+ if r_home is None:
+ return 'Rscript'
+ else:
+ return os.path.join(r_home, 'bin', 'Rscript')
def _entry_validate(entry: Sequence[str]) -> None:
| RScript location not always correct
This rarely comes up, because most people (in my experience) don't explicitly set R_HOME in regular terminals, but do have `RScript` in the path. If you use the RStudio-supplied Terminal, however, they do explicitly set R_HOME, which then causes the hook installs to break on some platforms (e.g., AWS Ubuntu is where I encountered the problem).
While some distributions add symlinks, the default install location for `RScript` is `$R_HOME/bin/Rscript`:
https://cran.r-project.org/doc/manuals/r-patched/R-admin.html#Installation
I believe it can be fixed in just one spot:
https://github.com/pre-commit/pre-commit/blob/354b900f15e88a06ce8493e0316c288c44777017/pre_commit/languages/r.py#L62
Just adding `bin` to that path join should solve the issue. | pre-commit/pre-commit | diff --git a/tests/languages/r_test.py b/tests/languages/r_test.py
index bc302a7..5bc63b2 100644
--- a/tests/languages/r_test.py
+++ b/tests/languages/r_test.py
@@ -4,6 +4,7 @@ import os.path
import pytest
+from pre_commit import envcontext
from pre_commit.languages import r
from testing.fixtures import make_config_from_repo
from testing.fixtures import make_repo
@@ -129,3 +130,14 @@ def test_r_parsing_file_local(tempdir_factory, store):
config=config,
expect_path_prefix=False,
)
+
+
+def test_rscript_exec_relative_to_r_home():
+ expected = os.path.join('r_home_dir', 'bin', 'Rscript')
+ with envcontext.envcontext((('R_HOME', 'r_home_dir'),)):
+ assert r._rscript_exec() == expected
+
+
+def test_path_rscript_exec_no_r_home_set():
+ with envcontext.envcontext((('R_HOME', envcontext.UNSET),)):
+ assert r._rscript_exec() == 'Rscript'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 2.17 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cfgv==3.4.0
covdefaults==2.3.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
identify==2.6.9
iniconfig==2.1.0
nodeenv==1.9.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
-e git+https://github.com/pre-commit/pre-commit.git@934afb85a4a14a6adea62e694e631845edfb7132#egg=pre_commit
pytest==8.3.5
pytest-env==1.1.5
PyYAML==6.0.2
re-assert==1.1.0
regex==2024.11.6
toml==0.10.2
tomli==2.2.1
virtualenv==20.29.3
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cfgv==3.4.0
- covdefaults==2.3.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- identify==2.6.9
- iniconfig==2.1.0
- nodeenv==1.9.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- pytest==8.3.5
- pytest-env==1.1.5
- pyyaml==6.0.2
- re-assert==1.1.0
- regex==2024.11.6
- toml==0.10.2
- tomli==2.2.1
- virtualenv==20.29.3
prefix: /opt/conda/envs/pre-commit
| [
"tests/languages/r_test.py::test_rscript_exec_relative_to_r_home"
] | [] | [
"tests/languages/r_test.py::test_r_parsing_file_no_opts_no_args",
"tests/languages/r_test.py::test_r_parsing_file_opts_no_args",
"tests/languages/r_test.py::test_r_parsing_file_no_opts_args",
"tests/languages/r_test.py::test_r_parsing_expr_no_opts_no_args1",
"tests/languages/r_test.py::test_r_parsing_expr_no_opts_no_args2",
"tests/languages/r_test.py::test_r_parsing_expr_opts_no_args2",
"tests/languages/r_test.py::test_r_parsing_expr_args_in_entry2",
"tests/languages/r_test.py::test_r_parsing_expr_non_Rscirpt",
"tests/languages/r_test.py::test_r_parsing_file_local",
"tests/languages/r_test.py::test_path_rscript_exec_no_r_home_set"
] | [] | MIT License | 12,478 | 170 | [
"pre_commit/languages/r.py"
] |
fsspec__filesystem_spec-930 | dcd8b22a72c83481d40ab3edbe390ee3fa14ec69 | 2022-03-25 13:45:14 | dcd8b22a72c83481d40ab3edbe390ee3fa14ec69 | diff --git a/fsspec/mapping.py b/fsspec/mapping.py
index d6346f4..8bad2de 100644
--- a/fsspec/mapping.py
+++ b/fsspec/mapping.py
@@ -187,7 +187,7 @@ def maybe_convert(value):
def get_mapper(
- url,
+ url="",
check=False,
create=False,
missing_exceptions=None,
diff --git a/fsspec/spec.py b/fsspec/spec.py
index fa8feb3..7f9a241 100644
--- a/fsspec/spec.py
+++ b/fsspec/spec.py
@@ -1153,7 +1153,7 @@ class AbstractFileSystem(metaclass=_Cached):
# all instances already also derive from pyarrow
return self
- def get_mapper(self, root, check=False, create=False):
+ def get_mapper(self, root="", check=False, create=False):
"""Create key/value store based on this file-system
Makes a MutableMapping interface to the FS at the given root path.
| Make `url` in `get_mapper` optional
Currently when using a reference filesystem, users need to always provide an empty string `""` or `/` as the root url. Is there any reason not to set `""` or `/` as the default, so users can just do `fs.get_mapper()`? | fsspec/filesystem_spec | diff --git a/fsspec/implementations/tests/test_archive.py b/fsspec/implementations/tests/test_archive.py
index 816f826..abc6d35 100644
--- a/fsspec/implementations/tests/test_archive.py
+++ b/fsspec/implementations/tests/test_archive.py
@@ -249,7 +249,7 @@ class TestAnyArchive:
def test_mapping(self, scenario: ArchiveTestScenario):
with scenario.provider(archive_data) as archive:
fs = fsspec.filesystem(scenario.protocol, fo=archive)
- m = fs.get_mapper("")
+ m = fs.get_mapper()
assert list(m) == ["a", "b", "deeply/nested/path"]
assert m["b"] == archive_data["b"]
diff --git a/fsspec/implementations/tests/test_memory.py b/fsspec/implementations/tests/test_memory.py
index c647fac..7cf29e4 100644
--- a/fsspec/implementations/tests/test_memory.py
+++ b/fsspec/implementations/tests/test_memory.py
@@ -9,7 +9,7 @@ def test_1(m):
files = m.find("")
assert files == ["/afiles/and/another", "/somefile"]
- files = sorted(m.get_mapper("/"))
+ files = sorted(m.get_mapper())
assert files == ["afiles/and/another", "somefile"]
diff --git a/fsspec/tests/test_mapping.py b/fsspec/tests/test_mapping.py
index 71de1e0..ed5a3b0 100644
--- a/fsspec/tests/test_mapping.py
+++ b/fsspec/tests/test_mapping.py
@@ -5,6 +5,7 @@ import sys
import pytest
import fsspec
+from fsspec.implementations.local import LocalFileSystem
from fsspec.implementations.memory import MemoryFileSystem
@@ -143,3 +144,8 @@ def test_setitem_numpy():
dtype="<m8[ns]",
) # timedelta64 scalar
assert m["c"] == b',M"\x9e\xc6\x99A\x065\x1c\xf0Rn4\xcb+'
+
+
+def test_empty_url():
+ m = fsspec.get_mapper()
+ assert isinstance(m.fs, LocalFileSystem)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 2022.02 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-asyncio",
"pytest-benchmark",
"pytest-cov",
"pytest-mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
-e git+https://github.com/fsspec/filesystem_spec.git@dcd8b22a72c83481d40ab3edbe390ee3fa14ec69#egg=fsspec
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
py-cpuinfo==9.0.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-benchmark==5.1.0
pytest-cov==6.0.0
pytest-mock==3.14.0
tomli==2.2.1
typing_extensions==4.13.0
| name: filesystem_spec
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- fsspec==2022.2.0+9.gdcd8b22
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- py-cpuinfo==9.0.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-benchmark==5.1.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- tomli==2.2.1
- typing-extensions==4.13.0
prefix: /opt/conda/envs/filesystem_spec
| [
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_mapping[zip]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_mapping[tar]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_mapping[tar-gz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_mapping[tar-bz2]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_mapping[tar-xz]",
"fsspec/implementations/tests/test_memory.py::test_1",
"fsspec/tests/test_mapping.py::test_empty_url"
] | [] | [
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_repr[zip]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_empty[zip]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_glob[zip]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_pickle[zip]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_all_dirnames[zip]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_ls[zip]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_find[zip]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_walk[zip]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_info[zip]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[zip-128]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[zip-512]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[zip-4096]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_repr[tar]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_empty[tar]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_glob[tar]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_pickle[tar]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_all_dirnames[tar]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_ls[tar]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_find[tar]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_walk[tar]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_info[tar]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[tar-128]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[tar-512]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[tar-4096]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_repr[tar-gz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_empty[tar-gz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_glob[tar-gz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_pickle[tar-gz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_all_dirnames[tar-gz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_ls[tar-gz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_find[tar-gz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_walk[tar-gz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_info[tar-gz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[tar-gz-128]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[tar-gz-512]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[tar-gz-4096]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_repr[tar-bz2]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_empty[tar-bz2]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_glob[tar-bz2]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_pickle[tar-bz2]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_all_dirnames[tar-bz2]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_ls[tar-bz2]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_find[tar-bz2]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_walk[tar-bz2]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_info[tar-bz2]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[tar-bz2-128]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[tar-bz2-512]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[tar-bz2-4096]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_repr[tar-xz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_empty[tar-xz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_glob[tar-xz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_pickle[tar-xz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_all_dirnames[tar-xz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_ls[tar-xz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_find[tar-xz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_walk[tar-xz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_info[tar-xz]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[tar-xz-128]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[tar-xz-512]",
"fsspec/implementations/tests/test_archive.py::TestAnyArchive::test_isdir_isfile[tar-xz-4096]",
"fsspec/implementations/tests/test_memory.py::test_strip",
"fsspec/implementations/tests/test_memory.py::test_put_single",
"fsspec/implementations/tests/test_memory.py::test_ls",
"fsspec/implementations/tests/test_memory.py::test_directories",
"fsspec/implementations/tests/test_memory.py::test_mv_recursive",
"fsspec/implementations/tests/test_memory.py::test_rm_no_psuedo_dir",
"fsspec/implementations/tests/test_memory.py::test_rewind",
"fsspec/implementations/tests/test_memory.py::test_empty_raises",
"fsspec/implementations/tests/test_memory.py::test_dir_errors",
"fsspec/implementations/tests/test_memory.py::test_no_rewind_append_mode",
"fsspec/implementations/tests/test_memory.py::test_moves",
"fsspec/implementations/tests/test_memory.py::test_rm_reursive_empty_subdir",
"fsspec/implementations/tests/test_memory.py::test_seekable",
"fsspec/implementations/tests/test_memory.py::test_remove_all",
"fsspec/tests/test_mapping.py::test_mapping_prefix",
"fsspec/tests/test_mapping.py::test_getitems_errors",
"fsspec/tests/test_mapping.py::test_ops",
"fsspec/tests/test_mapping.py::test_pickle",
"fsspec/tests/test_mapping.py::test_keys_view",
"fsspec/tests/test_mapping.py::test_multi",
"fsspec/tests/test_mapping.py::test_setitem_types"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,495 | 253 | [
"fsspec/mapping.py",
"fsspec/spec.py"
] |
|
reata__sqllineage-241 | bef4d3a701a6f0bbca0ff5f793cf4e282c4dfed4 | 2022-03-28 11:05:54 | c6a470135a4a2633b0f217ab147efebd727e9f75 | codecov[bot]: # [Codecov](https://codecov.io/gh/reata/sqllineage/pull/241?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None) Report
> Merging [#241](https://codecov.io/gh/reata/sqllineage/pull/241?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None) (cccd4db) into [master](https://codecov.io/gh/reata/sqllineage/commit/bef4d3a701a6f0bbca0ff5f793cf4e282c4dfed4?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None) (bef4d3a) will **increase** coverage by `0.00%`.
> The diff coverage is `100.00%`.
```diff
@@ Coverage Diff @@
## master #241 +/- ##
=======================================
Coverage 99.01% 99.01%
=======================================
Files 20 20
Lines 915 918 +3
=======================================
+ Hits 906 909 +3
Misses 9 9
```
| [Impacted Files](https://codecov.io/gh/reata/sqllineage/pull/241?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None) | Coverage Δ | |
|---|---|---|
| [sqllineage/core/models.py](https://codecov.io/gh/reata/sqllineage/pull/241/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-c3FsbGluZWFnZS9jb3JlL21vZGVscy5weQ==) | `100.00% <100.00%> (ø)` | |
| [sqllineage/utils/sqlparse.py](https://codecov.io/gh/reata/sqllineage/pull/241/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None#diff-c3FsbGluZWFnZS91dGlscy9zcWxwYXJzZS5weQ==) | `100.00% <100.00%> (ø)` | |
------
[Continue to review full report at Codecov](https://codecov.io/gh/reata/sqllineage/pull/241?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/reata/sqllineage/pull/241?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None). Last update [bef4d3a...cccd4db](https://codecov.io/gh/reata/sqllineage/pull/241?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=None).
| diff --git a/sqllineage/core/models.py b/sqllineage/core/models.py
index 9dd1821..dc12336 100644
--- a/sqllineage/core/models.py
+++ b/sqllineage/core/models.py
@@ -288,11 +288,14 @@ class Column:
]
elif isinstance(token, Identifier):
real_name = token.get_real_name()
- if real_name is None or (
- real_name == "decimal" and isinstance(token.tokens[-1], Function)
- ):
+ if (
# real name is None: col1=1 AS int
+ real_name is None
# real_name is decimal: case when col1 > 0 then col2 else col3 end as decimal(18, 0)
+ or (real_name == "decimal" and isinstance(token.tokens[-1], Function))
+ # real_name is cast: cast(col1 AS string) AS string
+ or (real_name == "cast" and isinstance(token.tokens[0], Function))
+ ):
source_columns = [
cqt
for tk in token.get_sublists()
diff --git a/sqllineage/utils/sqlparse.py b/sqllineage/utils/sqlparse.py
index 0f5d322..c55aac0 100644
--- a/sqllineage/utils/sqlparse.py
+++ b/sqllineage/utils/sqlparse.py
@@ -17,16 +17,21 @@ from sqlparse.utils import recurse
from sqllineage.utils.entities import SubQueryTuple
+def get_innermost_parenthesis(token: Parenthesis):
+ # in case of subquery in nested parenthesis, find the innermost one first
+ while True:
+ idx, sub_paren = token.token_next_by(i=Parenthesis)
+ if sub_paren is not None and idx == 1:
+ token = sub_paren
+ else:
+ break
+ return token
+
+
def is_subquery(token: TokenList) -> bool:
flag = False
if isinstance(token, Parenthesis):
- # in case of subquery in nested parenthesis, find the innermost one first
- while True:
- idx, sub_paren = token.token_next_by(i=Parenthesis)
- if sub_paren is not None and idx == 1:
- token = sub_paren
- else:
- break
+ token = get_innermost_parenthesis(token)
# check if innermost parenthesis contains SELECT
_, sub_token = token.token_next_by(m=(DML, "SELECT"))
if sub_token is not None:
@@ -68,7 +73,9 @@ def get_subquery_parentheses(
elif is_subquery(tk):
subquery.append(SubQueryTuple(tk, token.get_real_name()))
if is_subquery(target):
- subquery = [SubQueryTuple(target, token.get_real_name())]
+ subquery = [
+ SubQueryTuple(get_innermost_parenthesis(target), token.get_real_name())
+ ]
return subquery
| Incorrect Column Lineage With Nested Cast
SQL:
```sql
INSERT OVERWRITE TABLE tab1
SELECT cast(cast(col1 AS int) AS int) AS col1
FROM tab2;
```
Result:
```bash
$ sqllineage -f foo.sql -l column
<default>.tab1.col1 <- <default>.tab2.cast
```
Expected:
```
<default>.tab1.col1 <- <default>.tab2.col1
```
Tested against v1.3.4, casting without nested structure works fine. | reata/sqllineage | diff --git a/tests/test_columns.py b/tests/test_columns.py
index 892034d..dc632c5 100644
--- a/tests/test_columns.py
+++ b/tests/test_columns.py
@@ -395,6 +395,16 @@ FROM (SELECT col1 FROM tab2)"""
)
+def test_select_column_in_subquery_with_two_parenthesis():
+ sql = """INSERT OVERWRITE TABLE tab1
+SELECT col1
+FROM ((SELECT col1 FROM tab2)) dt"""
+ assert_column_lineage_equal(
+ sql,
+ [(ColumnQualifierTuple("col1", "tab2"), ColumnQualifierTuple("col1", "tab1"))],
+ )
+
+
def test_select_column_from_table_join():
sql = """INSERT OVERWRITE TABLE tab1
SELECT tab2.col1,
@@ -567,6 +577,26 @@ FROM tab2"""
)
[email protected](
+ "dtype", ["string", "timestamp", "date", "datetime", "decimal(18, 0)"]
+)
+def test_nested_cast_to_data_type(dtype):
+ sql = f"""INSERT OVERWRITE TABLE tab1
+SELECT cast(cast(col1 AS {dtype}) AS {dtype}) AS col1
+FROM tab2"""
+ assert_column_lineage_equal(
+ sql,
+ [(ColumnQualifierTuple("col1", "tab2"), ColumnQualifierTuple("col1", "tab1"))],
+ )
+ sql = f"""INSERT OVERWRITE TABLE tab1
+SELECT cast(cast(cast(cast(cast(col1 AS {dtype}) AS {dtype}) AS {dtype}) AS {dtype}) AS {dtype}) AS col1
+FROM tab2"""
+ assert_column_lineage_equal(
+ sql,
+ [(ColumnQualifierTuple("col1", "tab2"), ColumnQualifierTuple("col1", "tab1"))],
+ )
+
+
@pytest.mark.parametrize(
"dtype", ["string", "timestamp", "date", "datetime", "decimal(18, 0)"]
)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 2
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y nodejs npm"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | blinker==1.9.0
click==8.1.8
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
Flask==3.1.0
flask-cors==5.0.1
importlib_metadata==8.6.1
iniconfig==2.1.0
itsdangerous==2.2.0
Jinja2==3.1.6
MarkupSafe==3.0.2
networkx==3.2.1
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
-e git+https://github.com/reata/sqllineage.git@bef4d3a701a6f0bbca0ff5f793cf4e282c4dfed4#egg=sqllineage
sqlparse==0.5.3
tomli==2.2.1
typing_extensions==4.13.0
Werkzeug==3.1.3
zipp==3.21.0
| name: sqllineage
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- blinker==1.9.0
- click==8.1.8
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- flask==3.1.0
- flask-cors==5.0.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- itsdangerous==2.2.0
- jinja2==3.1.6
- markupsafe==3.0.2
- networkx==3.2.1
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- sqlparse==0.5.3
- tomli==2.2.1
- typing-extensions==4.13.0
- werkzeug==3.1.3
- zipp==3.21.0
prefix: /opt/conda/envs/sqllineage
| [
"tests/test_columns.py::test_select_column_in_subquery_with_two_parenthesis",
"tests/test_columns.py::test_nested_cast_to_data_type[string]",
"tests/test_columns.py::test_nested_cast_to_data_type[timestamp]",
"tests/test_columns.py::test_nested_cast_to_data_type[date]",
"tests/test_columns.py::test_nested_cast_to_data_type[datetime]",
"tests/test_columns.py::test_nested_cast_to_data_type[decimal(18,"
] | [
"tests/test_columns.py::test_select_column_using_window_function_with_parameters"
] | [
"tests/test_columns.py::test_select_column",
"tests/test_columns.py::test_select_column_wildcard",
"tests/test_columns.py::test_select_column_using_function",
"tests/test_columns.py::test_select_column_using_function_with_complex_parameter",
"tests/test_columns.py::test_select_column_using_window_function",
"tests/test_columns.py::test_select_column_using_expression",
"tests/test_columns.py::test_select_column_using_expression_in_parenthesis",
"tests/test_columns.py::test_select_column_using_boolean_expression_in_parenthesis",
"tests/test_columns.py::test_select_column_using_expression_with_table_qualifier_without_column_alias",
"tests/test_columns.py::test_select_column_using_case_when",
"tests/test_columns.py::test_select_column_using_case_when_with_subquery",
"tests/test_columns.py::test_select_column_with_table_qualifier",
"tests/test_columns.py::test_select_columns",
"tests/test_columns.py::test_select_column_in_subquery",
"tests/test_columns.py::test_select_column_from_table_join",
"tests/test_columns.py::test_select_column_without_table_qualifier_from_table_join",
"tests/test_columns.py::test_select_column_from_same_table_multiple_time_using_different_alias",
"tests/test_columns.py::test_comment_after_column_comma_first",
"tests/test_columns.py::test_comment_after_column_comma_last",
"tests/test_columns.py::test_cast_with_comparison",
"tests/test_columns.py::test_cast_to_data_type[string]",
"tests/test_columns.py::test_cast_to_data_type[timestamp]",
"tests/test_columns.py::test_cast_to_data_type[date]",
"tests/test_columns.py::test_cast_to_data_type[datetime]",
"tests/test_columns.py::test_cast_to_data_type[decimal(18,",
"tests/test_columns.py::test_cast_to_data_type_with_case_when[string]",
"tests/test_columns.py::test_cast_to_data_type_with_case_when[timestamp]",
"tests/test_columns.py::test_cast_to_data_type_with_case_when[date]",
"tests/test_columns.py::test_cast_to_data_type_with_case_when[datetime]",
"tests/test_columns.py::test_cast_to_data_type_with_case_when[decimal(18,",
"tests/test_columns.py::test_cast_using_constant",
"tests/test_columns.py::test_window_function_in_subquery",
"tests/test_columns.py::test_invalid_syntax_as_without_alias",
"tests/test_columns.py::test_column_reference_from_cte_using_alias",
"tests/test_columns.py::test_column_reference_from_cte_using_qualifier",
"tests/test_columns.py::test_column_reference_from_previous_defined_cte",
"tests/test_columns.py::test_multiple_column_references_from_previous_defined_cte",
"tests/test_columns.py::test_column_reference_with_ansi89_join",
"tests/test_columns.py::test_smarter_column_resolution_using_query_context",
"tests/test_columns.py::test_column_reference_using_union",
"tests/test_columns.py::test_column_lineage_multiple_paths_for_same_column"
] | [] | MIT License | 12,512 | 703 | [
"sqllineage/core/models.py",
"sqllineage/utils/sqlparse.py"
] |
lbl-anp__becquerel-328 | 479a264362d4127f339292f4f8d21a6335a6903b | 2022-03-28 20:06:51 | e0c52f1593e1908a04c14891393121d146862a6f | markbandstra: This PR is failing due to an issue I saw locally. I figured it was an error in `pre-commit`, `black`, or `click` but could not fix it and bypassed it. Here is the relevant error:
```
black....................................................................Failed
- hook id: black
- exit code: 1
Traceback (most recent call last):
File "/home/runner/.cache/pre-commit/repoxi42o_5z/py_env-python3.9/bin/black", line 8, in <module>
sys.exit(patched_main())
File "/home/runner/.cache/pre-commit/repoxi42o_5z/py_env-python3.9/lib/python3.9/site-packages/black/__init__.py", line 1423, in patched_main
patch_click()
File "/home/runner/.cache/pre-commit/repoxi42o_5z/py_env-python3.9/lib/python3.9/site-packages/black/__init__.py", line 1409, in patch_click
from click import _unicodefun
ImportError: cannot import name '_unicodefun' from 'click' (/home/runner/.cache/pre-commit/repoxi42o_5z/py_env-python3.9/lib/python3.9/site-packages/click/__init__.py)
```
jvavrek: I don't see the error on my machine:
- Python 3.8.10
- black 22.1.0
- click 8.0.1
jccurtis: @jvavrek yep I don't either BUT `click` was just updated a few hours ago: https://pypi.org/project/click/8.1.0/
markbandstra: Good point, here is my machine:
* Python 3.9.12
* pre-commit 2.17.0
* black 22.1.0
* click 8.0.4
jccurtis: Confirmed on a fresh install:
```bash
# Inside bq repo
conda create -n bq-test python=3.9
conda activate bq-test
pip install -r requirements-dev.txt
pip install -e .
pre-commit install
pre-commit run --all
```
gives:
```txt
❯ pre-commit run --all
Trim Trailing Whitespace.................................................Passed
Fix End of Files.........................................................Passed
Check for added large files..............................................Passed
Check JSON...........................................(no files to check)Skipped
Check Toml...............................................................Passed
Check Yaml...............................................................Passed
Check for merge conflicts................................................Passed
Check Yaml...............................................................Passed
Mixed line ending........................................................Passed
Fix requirements.txt.....................................................Passed
black....................................................................Failed
- hook id: black
- exit code: 1
Traceback (most recent call last):
File "<removed>/py_env-python3.9/bin/black", line 8, in <module>
sys.exit(patched_main())
File "<removed>/py_env-python3.9/lib/python3.9/site-packages/black/__init__.py", line 1423, in patched_main
patch_click()
File "<removed>/py_env-python3.9/lib/python3.9/site-packages/black/__init__.py", line 1409, in patch_click
from click import _unicodefun
ImportError: cannot import name '_unicodefun' from 'click' (<removed>/py_env-python3.9/lib/python3.9/site-packages/click/__init__.py)
autoflake................................................................Passed
flake8...................................................................Passed
markdownlint-fix.........................................................Passed
prettier.................................................................Passed
```
markbandstra: Maybe a Python3.9 issue? That's a commonality between my machine and the failing test runner.
jccurtis: @markbandstra I think so
jccurtis: This also occurs on `python3.10` with the same steps
jvavrek: > I just upgraded to click 8.0.4 (latest available on `conda-forge`) and I still don't see the error.
Scratch this, my `pre-commit` was skipping a bunch of stuff. I can replicate the error with
- Python 3.8.10
- black, 22.1.0 (compiled: no)
- pre-commit 2.12.0
- click 8.0.1
jccurtis: FYI This is supposedly fixed but I do not see the fix with the most updated packages: https://github.com/psf/black/pull/2966
```txt
❯ pip freeze
algopy==0.5.7
asteval==0.9.26
attrs==21.4.0
beautifulsoup4==4.10.0
-e git+https://github.com/lbl-anp/becquerel.git@62ddabd2db84d1e67fc1ad1f80433e13f0785b39#egg=becquerel
black==22.3.0
bump2version==1.0.1
certifi==2020.6.20
cfgv==3.3.1
charset-normalizer==2.0.12
click==8.1.0
coverage==6.3.2
cycler==0.11.0
distlib==0.3.4
filelock==3.6.0
flake8==4.0.1
fonttools==4.31.2
future==0.18.2
h5py==3.6.0
html5lib==1.1
identify==2.4.12
idna==3.3
iminuit==2.11.2
iniconfig==1.1.1
kiwisolver==1.4.2
llvmlite==0.38.0
lmfit==1.0.3
lxml==4.8.0
matplotlib==3.5.1
mccabe==0.6.1
mypy-extensions==0.4.3
nodeenv==1.6.0
numba==0.55.1
numdifftools==0.9.40
numpy==1.21.5
packaging==21.3
pandas==1.4.1
pathspec==0.9.0
patsy==0.5.2
Pillow==9.0.1
platformdirs==2.5.1
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
pycodestyle==2.8.0
pyflakes==2.4.0
pyparsing==3.0.7
pytest==6.2.5
pytest-black==0.3.12
pytest-cov==3.0.0
pytest-rerunfailures==10.2
python-dateutil==2.8.2
pytz==2022.1
PyYAML==6.0
requests==2.27.1
scipy==1.8.0
six==1.16.0
soupsieve==2.3.1
statsmodels==0.13.2
toml==0.10.2
tomli==2.0.1
uncertainties==3.1.6
urllib3==1.26.9
virtualenv==20.14.0
webencodings==0.5.1
```
jvavrek: Comments in that psf/black link suggest 8.0.1 should work, so it's strange that I'm still seeing it with click==8.0.1
jccurtis: @jvavrek yep I see this with `click==8.0.1` and `click==8.0.4`. For reference:
```txt
black==22.3.0
pre-commit==2.17.0
```
(same as the `pip freeze` from above)
micahfolsom: Not sure what's going on there - bumping black to 22.3 fixed the issue for me, both locally, and in the pre-commit config.
micahfolsom: @jccurtis Not sure what's going on there - bumping black to 22.3 fixed the issue for me, both locally, and in the pre-commit config.
jccurtis: @micahfolsom beat me to it ... I was writing the PR 🤣: #329
The issue in the testing above was a bit of a 🤦 ... we weren't updating the `pre-commit` config
jccurtis: @micahfolsom let's get #329 in first. It makes the changelog easier to read 😄
markbandstra: Great find @micahfolsom ! | diff --git a/becquerel/tools/materials_compendium.py b/becquerel/tools/materials_compendium.py
index af5988b..0e2af69 100644
--- a/becquerel/tools/materials_compendium.py
+++ b/becquerel/tools/materials_compendium.py
@@ -18,7 +18,7 @@ import os
import warnings
import numpy as np
import pandas as pd
-from .materials_error import MaterialsWarning
+from .materials_error import MaterialsWarning, MaterialsError
FNAME = os.path.join(os.path.split(__file__)[0], "MaterialsCompendium.json")
@@ -60,6 +60,23 @@ def fetch_compendium_data():
data = json.load(f)
# extract relevant data
+ if isinstance(data, list):
+ print("Pre-March 2022 JSON detected")
+ elif isinstance(data, dict):
+ print("Post-March 2022 JSON detected")
+ if "siteVersion" not in data.keys() or "data" not in data.keys():
+ raise MaterialsError(
+ "Attempt to read Compendium JSON failed; "
+ "dictionary must have keys 'siteVersion' "
+ "and 'data' but have keys " + str(list(data.keys()))
+ )
+ print(f"Compendium data - site version: {data['siteVersion']}")
+ data = data["data"]
+ else:
+ raise MaterialsError(
+ "Attempt to read Compendium JSON failed; "
+ "object must be a list or dict but is a " + str(type(data))
+ )
names = [datum["Name"] for datum in data]
formulae = [datum["Formula"] if "Formula" in datum else "-" for datum in data]
densities = [datum["Density"] for datum in data]
| Latest Compendium materials JSON causes errors
The latest version of the materials data from the [PNNL Compendium](https://compendium.cwmd.pnnl.gov) cannot be parsed by the current version of `becquerel`.
Downloading the latest JSON and placing it into `becquerel/tools` and calling `bq.tools.force_load_and_write_materials_csv()` causes the following error:
```
Traceback (most recent call last):
File "<string>", line 1, in <module>
File ".../becquerel/tools/materials.py", line 196, in force_load_and_write_materials_csv
materials = _load_and_compile_materials()
File ".../becquerel/tools/materials.py", line 26, in _load_and_compile_materials
data_comp = fetch_compendium_data()
File ".../becquerel/tools/materials_compendium.py", line 63, in fetch_compendium_data
names = [datum["Name"] for datum in data]
File ".../becquerel/tools/materials_compendium.py", line 63, in <listcomp>
names = [datum["Name"] for datum in data]
TypeError: string indices must be integers
```
It seems from looking at a JSON from a few months ago and a JSON downloaded today that the difference is that the older JSON has a structure that parses into a `list` of `dict`s:
```json
[
{
"Comment": "Material 1 data",
},
{
"Comment": "Material 2 data",
}
]
```
whereas the new format parses as a `dict` and the list of material data are inside of it:
```json
{
"siteVersion": "0.1.1",
"data": [
{
"Comment": "Material 1 data",
},
{
"Comment": "Material 2 data",
}
]
}
```
So the solution is easy: to check for the data that get loaded and use `data["data"]` if it is the new format.
Both versions appear to contain 411 materials, but I have not checked whether any of the data for individual materials differ. | lbl-anp/becquerel | diff --git a/tests/materials_test.py b/tests/materials_test.py
index ced6db6..6fe3a4f 100644
--- a/tests/materials_test.py
+++ b/tests/materials_test.py
@@ -87,8 +87,11 @@ def test_materials_dummy_csv():
@pytest.mark.webtest
-def test_materials_dummy_compendium():
- """Test fetch_materials with a dummy Compendium JSON file."""
+def test_materials_dummy_compendium_pre2022():
+ """Test fetch_materials with a dummy Compendium JSON file.
+
+ The dummy JSON file uses the format seen prior to March 2022.
+ """
# point to an generate a dummy JSON file
fname_orig = materials_compendium.FNAME
materials_compendium.FNAME = fname_orig[:-5] + "_dummy.json"
@@ -128,6 +131,79 @@ def test_materials_dummy_compendium():
materials_compendium.FNAME = fname_orig
[email protected]
+def test_materials_dummy_compendium_2022():
+ """Test fetch_materials with a dummy Compendium JSON file.
+
+ The dummy JSON file uses the format first seen in March 2022.
+ """
+ # point to an generate a dummy JSON file
+ fname_orig = materials_compendium.FNAME
+ materials_compendium.FNAME = fname_orig[:-5] + "_dummy.json"
+ data = {
+ "siteVersion": "0.0.0",
+ "data": [
+ {
+ "Density": 8.4e-5,
+ "Elements": [
+ {
+ "AtomFraction_whole": 1.0,
+ "Element": "H",
+ "WeightFraction_whole": 1.0,
+ }
+ ],
+ "Formula": "H2",
+ "Name": "Hydrogen",
+ },
+ {
+ "Density": 1.16e-3,
+ "Elements": [
+ {
+ "AtomFraction_whole": 1.0,
+ "Element": "N",
+ "WeightFraction_whole": 1.0,
+ }
+ ],
+ "Formula": "N2",
+ "Name": "Nitrogen",
+ },
+ ],
+ }
+ with open(materials_compendium.FNAME, "w") as f:
+ json.dump(data, f, indent=4)
+ with pytest.warns(None) as record:
+ materials._load_and_compile_materials()
+ assert len(record) == 0, "Expected no MaterialsWarnings to be raised"
+ # remove siteVersion and make sure there is an error raised
+ del data["siteVersion"]
+ with open(materials_compendium.FNAME, "w") as f:
+ json.dump(data, f, indent=4)
+ with pytest.raises(MaterialsError):
+ materials._load_and_compile_materials()
+ # remove the dummy file and point back to original
+ os.remove(materials_compendium.FNAME)
+ materials_compendium.FNAME = fname_orig
+
+
[email protected]
+def test_materials_dummy_compendium_error():
+ """Test fetch_materials with a dummy Compendium JSON file.
+
+ The dummy JSON file returns something that is not a list or dict.
+ """
+ # point to an generate a dummy JSON file
+ fname_orig = materials_compendium.FNAME
+ materials_compendium.FNAME = fname_orig[:-5] + "_dummy.json"
+ data = None
+ with open(materials_compendium.FNAME, "w") as f:
+ json.dump(data, f, indent=4)
+ with pytest.raises(MaterialsError):
+ materials._load_and_compile_materials()
+ # remove the dummy file and point back to original
+ os.remove(materials_compendium.FNAME)
+ materials_compendium.FNAME = fname_orig
+
+
@pytest.mark.webtest
def test_materials_no_compendium():
"""Test fetch_materials with no Compendium JSON file."""
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"pip install pre-commit"
],
"python": "3.9",
"reqs_path": [
"requirements.txt",
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asteval==1.0.6
attrs==25.3.0
beautifulsoup4==4.13.3
-e git+https://github.com/lbl-anp/becquerel.git@479a264362d4127f339292f4f8d21a6335a6903b#egg=becquerel
black==25.1.0
bump2version==1.0.1
certifi==2025.1.31
cfgv==3.4.0
charset-normalizer==3.4.1
click==8.1.8
contourpy==1.3.0
coverage==7.8.0
cycler==0.12.1
dill==0.3.9
distlib==0.3.9
filelock==3.18.0
flake8==7.2.0
fonttools==4.56.0
future==1.0.0
h5py==3.13.0
html5lib==1.1
identify==2.6.9
idna==3.10
iminuit==2.30.1
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
llvmlite==0.43.0
lmfit==1.3.3
lxml==5.3.1
matplotlib==3.9.4
mccabe==0.7.0
mypy-extensions==1.0.0
nodeenv==1.9.1
numba==0.60.0
numdifftools==0.9.41
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pathspec==0.12.1
pillow==11.1.0
platformdirs==4.3.7
pluggy==1.5.0
pre_commit==4.2.0
py==1.11.0
pycodestyle==2.13.0
pyflakes==3.3.2
pyparsing==3.2.3
pytest==6.2.5
pytest-black==0.4.0
pytest-cov==6.0.0
pytest-rerunfailures==12.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
requests==2.32.3
scipy==1.13.1
six==1.17.0
soupsieve==2.6
toml==0.10.2
tomli==2.2.1
typing_extensions==4.13.0
tzdata==2025.2
uncertainties==3.2.2
urllib3==2.3.0
virtualenv==20.29.3
webencodings==0.5.1
zipp==3.21.0
| name: becquerel
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asteval==1.0.6
- attrs==25.3.0
- beautifulsoup4==4.13.3
- becquerel==0.5.0
- black==25.1.0
- bump2version==1.0.1
- certifi==2025.1.31
- cfgv==3.4.0
- charset-normalizer==3.4.1
- click==8.1.8
- contourpy==1.3.0
- coverage==7.8.0
- cycler==0.12.1
- dill==0.3.9
- distlib==0.3.9
- filelock==3.18.0
- flake8==7.2.0
- fonttools==4.56.0
- future==1.0.0
- h5py==3.13.0
- html5lib==1.1
- identify==2.6.9
- idna==3.10
- iminuit==2.30.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- llvmlite==0.43.0
- lmfit==1.3.3
- lxml==5.3.1
- matplotlib==3.9.4
- mccabe==0.7.0
- mypy-extensions==1.0.0
- nodeenv==1.9.1
- numba==0.60.0
- numdifftools==0.9.41
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pathspec==0.12.1
- pillow==11.1.0
- platformdirs==4.3.7
- pluggy==1.5.0
- pre-commit==4.2.0
- py==1.11.0
- pycodestyle==2.13.0
- pyflakes==3.3.2
- pyparsing==3.2.3
- pytest==6.2.5
- pytest-black==0.4.0
- pytest-cov==6.0.0
- pytest-rerunfailures==12.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- requests==2.32.3
- scipy==1.13.1
- six==1.17.0
- soupsieve==2.6
- toml==0.10.2
- tomli==2.2.1
- typing-extensions==4.13.0
- tzdata==2025.2
- uncertainties==3.2.2
- urllib3==2.3.0
- virtualenv==20.29.3
- webencodings==0.5.1
- zipp==3.21.0
prefix: /opt/conda/envs/becquerel
| [
"tests/materials_test.py::test_materials_dummy_compendium_error"
] | [
"tests/materials_test.py::test_materials_force",
"tests/materials_test.py::test_materials_dummy_compendium_pre2022",
"tests/materials_test.py::test_materials_dummy_compendium_2022"
] | [
"tests/materials_test.py::BLACK",
"tests/materials_test.py::TestConvertComposition::test_success",
"tests/materials_test.py::TestConvertComposition::test_not_iterable",
"tests/materials_test.py::TestConvertComposition::test_not_string",
"tests/materials_test.py::TestConvertComposition::test_bad_line",
"tests/materials_test.py::TestConvertComposition::test_bad_z",
"tests/materials_test.py::TestConvertComposition::test_z_out_of_range",
"tests/materials_test.py::test_materials",
"tests/materials_test.py::test_materials_dummy_csv",
"tests/materials_test.py::test_materials_no_compendium",
"tests/materials_test.py::test_remove_materials_csv"
] | [] | BSD-3-Clause | 12,517 | 422 | [
"becquerel/tools/materials_compendium.py"
] |
stummjr__flake8-scrapy-19 | e09bcf1e387b52d081d2df4b6d6c459203b31a5b | 2022-03-29 17:45:55 | e09bcf1e387b52d081d2df4b6d6c459203b31a5b | diff --git a/finders/oldstyle.py b/finders/oldstyle.py
index 0910d97..dceef88 100644
--- a/finders/oldstyle.py
+++ b/finders/oldstyle.py
@@ -12,7 +12,7 @@ class UrlJoinIssueFinder(IssueFinder):
return
first_param = node.args[0]
- if not isinstance(first_param, ast.Attribute):
+ if not isinstance(first_param, ast.Attribute) or not isinstance(first_param.value, ast.Name):
return
if first_param.value.id == 'response' and first_param.attr == 'url':
diff --git a/flake8_scrapy.py b/flake8_scrapy.py
index c1198f4..839b2b8 100644
--- a/flake8_scrapy.py
+++ b/flake8_scrapy.py
@@ -6,7 +6,7 @@ from finders.domains import (
from finders.oldstyle import OldSelectorIssueFinder, UrlJoinIssueFinder
-__version__ = '0.0.1'
+__version__ = '0.0.2'
class ScrapyStyleIssueFinder(ast.NodeVisitor):
diff --git a/setup.py b/setup.py
index 4d058d5..3518539 100644
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@ with open('README.md', 'r') as f:
setuptools.setup(
name='flake8-scrapy',
license='MIT',
- version='0.0.1',
+ version='0.0.2',
long_description=long_description,
long_description_content_type='text/markdown',
author='Valdir Stumm Junior',
| Failed SCP03 rule check
When starting the fakehaven stage in CI, I received the following error:
```
$ flakeheaven lint --format=grouped --exit-zero --import-order-style pep8 --application-import-names directories
multiprocessing.pool.RemoteTraceback:
"""
Traceback (most recent call last):
File "/usr/local/lib/python3.7/multiprocessing/pool.py", line 121, in worker
result = (True, func(*args, **kwds))
File "/usr/local/lib/python3.7/multiprocessing/pool.py", line 44, in mapstar
return list(map(*args))
File "/usr/local/lib/python3.7/site-packages/flake8/checker.py", line 687, in _run_checks
return checker.run_checks()
File "/usr/local/lib/python3.7/site-packages/flakeheaven/_patched/_checkers.py", line 282, in run_checks
return super().run_checks()
File "/usr/local/lib/python3.7/site-packages/flake8/checker.py", line 597, in run_checks
self.run_ast_checks()
File "/usr/local/lib/python3.7/site-packages/flake8/checker.py", line 500, in run_ast_checks
for (line_number, offset, text, _) in runner:
File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 55, in run
finder.visit(self.tree)
File "/usr/local/lib/python3.7/ast.py", line 271, in visit
return visitor(node)
File "/usr/local/lib/python3.7/ast.py", line 279, in generic_visit
self.visit(item)
File "/usr/local/lib/python3.7/ast.py", line 271, in visit
return visitor(node)
File "/usr/local/lib/python3.7/ast.py", line 279, in generic_visit
self.visit(item)
File "/usr/local/lib/python3.7/ast.py", line 271, in visit
return visitor(node)
File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 38, in visit_Assign
self.find_issues_visitor('Assign', node)
File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 35, in find_issues_visitor
self.generic_visit(node)
File "/usr/local/lib/python3.7/ast.py", line 281, in generic_visit
self.visit(value)
File "/usr/local/lib/python3.7/ast.py", line 271, in visit
return visitor(node)
File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 41, in visit_Call
self.find_issues_visitor('Call', node)
File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 35, in find_issues_visitor
self.generic_visit(node)
File "/usr/local/lib/python3.7/ast.py", line 279, in generic_visit
self.visit(item)
File "/usr/local/lib/python3.7/ast.py", line 271, in visit
return visitor(node)
File "/usr/local/lib/python3.7/ast.py", line 281, in generic_visit
self.visit(value)
File "/usr/local/lib/python3.7/ast.py", line 271, in visit
return visitor(node)
File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 41, in visit_Call
self.find_issues_visitor('Call', node)
File "/usr/local/lib/python3.7/site-packages/flake8_scrapy.py", line 34, in find_issues_visitor
self.issues.extend(list(issues))
File "/usr/local/lib/python3.7/site-packages/finders/oldstyle.py", line 18, in find_issues
if first_param.value.id == 'response' and first_param.attr == 'url':
AttributeError: 'Attribute' object has no attribute 'id'
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/bin/flakeheaven", line 8, in <module>
sys.exit(entrypoint())
File "/usr/local/lib/python3.7/site-packages/flakeheaven/_cli.py", line 40, in entrypoint
exit_code, msg = main(argv)
File "/usr/local/lib/python3.7/site-packages/flakeheaven/_cli.py", line 32, in main
return COMMANDS[command_name](argv=argv[1:])
File "/usr/local/lib/python3.7/site-packages/flakeheaven/commands/_lint.py", line 12, in lint_command
app.run(argv)
File "/usr/local/lib/python3.7/site-packages/flake8/main/application.py", line 375, in run
self._run(argv)
File "/usr/local/lib/python3.7/site-packages/flake8/main/application.py", line 364, in _run
self.run_checks()
File "/usr/local/lib/python3.7/site-packages/flake8/main/application.py", line 271, in run_checks
self.file_checker_manager.run()
File "/usr/local/lib/python3.7/site-packages/flake8/checker.py", line 309, in run
self.run_parallel()
File "/usr/local/lib/python3.7/site-packages/flake8/checker.py", line 275, in run_parallel
for ret in pool_map:
File "/usr/local/lib/python3.7/multiprocessing/pool.py", line 354, in <genexpr>
return (item for chunk in result for item in chunk)
File "/usr/local/lib/python3.7/multiprocessing/pool.py", line 748, in next
raise value
AttributeError: 'Attribute' object has no attribute 'id'
```
The problem occurs in this line:
```python
urljoin(settings.SERVICE_URLS.PD, '/path')
```
Where are the `settings`:
```python
from pydantic import BaseSettings, BaseModel
class ServiceUrlsSchema(BaseModel):
PD: str
class Settings(BaseSettings):
SERVICE_URLS: ServiceUrlsSchema
```
| stummjr/flake8-scrapy | diff --git a/tests/test_oldstyle.py b/tests/test_oldstyle.py
index 29e9f50..6dc2d34 100644
--- a/tests/test_oldstyle.py
+++ b/tests/test_oldstyle.py
@@ -17,6 +17,8 @@ def test_finds_old_style_urljoin(code):
@pytest.mark.parametrize('code', [
('response.urljoin("/foo")'),
('url = urljoin()'),
+ ('urljoin(x, "/foo")'),
+ ('urljoin(x.y.z, "/foo")'),
])
def test_dont_find_old_style_urljoin(code):
issues = run_checker(code)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 3
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.7",
"reqs_path": [
"requirements.txt",
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==24.2.0
certifi @ file:///croot/certifi_1671487769961/work/certifi
coverage==7.2.7
entrypoints==0.3
exceptiongroup==1.2.2
execnet==2.0.2
flake8==3.7.9
-e git+https://github.com/stummjr/flake8-scrapy.git@e09bcf1e387b52d081d2df4b6d6c459203b31a5b#egg=flake8_scrapy
importlib-metadata==6.7.0
iniconfig==2.0.0
mccabe==0.6.1
more-itertools==9.1.0
packaging==24.0
pluggy==0.13.1
py==1.11.0
pycodestyle==2.5.0
pyflakes==2.1.1
pytest==7.4.4
pytest-asyncio==0.21.2
pytest-cov==2.8.1
pytest-mock==3.11.1
pytest-xdist==3.5.0
six==1.11.0
tomli==2.0.1
typing_extensions==4.7.1
wcwidth==0.2.13
zipp==3.15.0
| name: flake8-scrapy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==24.2.0
- coverage==7.2.7
- entrypoints==0.3
- exceptiongroup==1.2.2
- execnet==2.0.2
- flake8==3.7.9
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- mccabe==0.6.1
- more-itertools==9.1.0
- packaging==24.0
- pluggy==0.13.1
- py==1.11.0
- pycodestyle==2.5.0
- pyflakes==2.1.1
- pytest==7.4.4
- pytest-asyncio==0.21.2
- pytest-cov==2.8.1
- pytest-mock==3.11.1
- pytest-xdist==3.5.0
- six==1.11.0
- tomli==2.0.1
- typing-extensions==4.7.1
- wcwidth==0.2.13
- zipp==3.15.0
prefix: /opt/conda/envs/flake8-scrapy
| [
"tests/test_oldstyle.py::test_dont_find_old_style_urljoin[urljoin(x.y.z,"
] | [] | [
"tests/test_oldstyle.py::test_finds_old_style_urljoin[urljoin(response.url,",
"tests/test_oldstyle.py::test_finds_old_style_urljoin[url",
"tests/test_oldstyle.py::test_dont_find_old_style_urljoin[response.urljoin(\"/foo\")]",
"tests/test_oldstyle.py::test_dont_find_old_style_urljoin[url",
"tests/test_oldstyle.py::test_dont_find_old_style_urljoin[urljoin(x,",
"tests/test_oldstyle.py::test_find_old_style_selector[sel"
] | [] | MIT License | 12,521 | 403 | [
"finders/oldstyle.py",
"flake8_scrapy.py",
"setup.py"
] |
|
RockefellerArchiveCenter__DACSspace-51 | 35d9e7686b660844a831391c4ac3d2fd3ce0be86 | 2022-03-29 18:54:24 | 35d9e7686b660844a831391c4ac3d2fd3ce0be86 | diff --git a/dacsspace/dacsspace.py b/dacsspace/dacsspace.py
index fcecea7..b953a5b 100644
--- a/dacsspace/dacsspace.py
+++ b/dacsspace/dacsspace.py
@@ -6,9 +6,10 @@ from .validator import Validator
class DACSspace:
"""Base DACSspace class. Fetches data from AS, validates and reports results."""
- def run(self, published_only, invalid_only):
+ def run(self, published_only, invalid_only,
+ schema_identifier='single_level_required.json', schema_filepath=None):
client = ArchivesSpaceClient()
- validator = Validator()
+ validator = Validator(schema_identifier, schema_filepath)
reporter = CSVReporter()
data = client.get_resources(published_only)
results = []
@@ -21,3 +22,6 @@ class DACSspace:
# These variables should eventually be passed as arguments in the command line
# published_only = False
# invalid_only = True
+# schema_identifier - should default to single_level_required.json
+# schema_filepath - should default to None, only one of schema_identifier
+# or schema_filepath allowed
diff --git a/dacsspace/validator.py b/dacsspace/validator.py
index 82cc9b3..3f4d8cb 100644
--- a/dacsspace/validator.py
+++ b/dacsspace/validator.py
@@ -1,14 +1,25 @@
import json
-from jsonschema import Draft7Validator
+from jsonschema import Draft202012Validator
class Validator:
"""Validates data from ArchivesSpace."""
- def __init__(self):
- with open("single_level_required.json", "r") as json_file:
+ def __init__(self, schema_identifier, schema_filepath):
+ """Loads and validates the schema from an identifier or filepath.
+
+ Args:
+ schema_identifier (str): a pointer to a schema that is part of
+ DACSspace, located in the `schemas` directory.
+ schema_filepath (str): a filepath pointing to an external schema.
+ """
+ self.validator = Draft202012Validator
+ if not schema_filepath:
+ schema_filepath = f"schemas/{schema_identifier.removesuffix('.json')}.json"
+ with open(schema_filepath, "r") as json_file:
self.schema = json.load(json_file)
+ self.validator.check_schema(self.schema)
def validate_data(self, data):
"""Validates data.
@@ -21,8 +32,7 @@ class Validator:
indication of the validation result and, if necessary, an explanation
of any validation errors. { "valid": False, "explanation": "You are missing the following fields..." }
"""
- schema = self.schema
- validator = Draft7Validator(schema)
+ validator = self.validator(self.schema)
errors_found = [error.message for error in validator.iter_errors(data)]
if len(errors_found):
return {"valid": False, "explanation": "\n".join(errors_found)}
| Allow users to specify a schema to validate against
**Is your feature request related to a problem? Please describe.**
DACSspace will ship with a schema for single-level minimum requirement and (maybe) a custom RAC schema. Users may also want to supply their own schema to validate against.
**Describe the solution you'd like**
Allow users to specify which schema they want to validate against. Ideally we should allow both:
- A pointer to an existing schema that is part of DACSspace (presuming there is more than one), somewhat like we do in `rac_schemas`: https://github.com/RockefellerArchiveCenter/rac_schemas/blob/base/rac_schemas/__init__.py#L49
- A schema as a file object or a filepath, which would allow users to create their own arbitrary schema. | RockefellerArchiveCenter/DACSspace | diff --git a/single_level_required.json b/schemas/single_level_required.json
similarity index 100%
rename from single_level_required.json
rename to schemas/single_level_required.json
diff --git a/tests/test_validator.py b/tests/test_validator.py
index 5b6176b..cc75b39 100644
--- a/tests/test_validator.py
+++ b/tests/test_validator.py
@@ -1,13 +1,40 @@
-# from unittest.mock import patch
-
-
import json
+import os
import unittest
+from jsonschema.exceptions import SchemaError
+
from dacsspace.validator import Validator
class TestValidator(unittest.TestCase):
+ def test_schema(self):
+ """Asserts schema identifiers and filenames are handled correctly."""
+
+ test_schema_filepath = "test_schema.json"
+
+ # handling for schema identifier
+ validator = Validator('single_level_required', None)
+ self.assertEqual(validator.schema["$id"], 'single_level_required.json')
+
+ validator = Validator('single_level_required.json', None)
+ self.assertEqual(validator.schema["$id"], 'single_level_required.json')
+
+ # passing external filename
+ with open(test_schema_filepath, "w") as sf:
+ json.dump({"$id": "test_schema.json"}, sf)
+ validator = Validator(None, test_schema_filepath)
+ self.assertEqual(validator.schema["$id"], test_schema_filepath)
+
+ # invalid external schema
+ with open(test_schema_filepath, "w") as sf:
+ json.dump({"type": 12}, sf)
+ with self.assertRaises(SchemaError):
+ validator = Validator(None, test_schema_filepath)
+
+ # cleanup
+ os.remove(test_schema_filepath)
+
def test_validator(self):
valid_json = "fixtures/valid_resource.json"
invalid_fixtures = [
@@ -15,12 +42,16 @@ class TestValidator(unittest.TestCase):
"fixtures/no_metadata_rights.json"]
with open(valid_json, 'r') as v:
valid_json = json.load(v)
- result = Validator().validate_data(valid_json)
+ result = Validator(
+ 'single_level_required',
+ None).validate_data(valid_json)
self.assertTrue(isinstance(result, dict))
self.assertEqual(result["valid"], True)
for f in invalid_fixtures:
with open(f, 'r') as i:
invalid_json = json.load(i)
- result = Validator().validate_data(invalid_json)
+ result = Validator(
+ 'single_level_required',
+ None).validate_data(invalid_json)
self.assertTrue(isinstance(result, dict))
self.assertEqual(result["valid"], False)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | ArchivesSnake==0.9.1
attrs==21.4.0
boltons==21.0.0
certifi==2021.10.8
charset-normalizer==2.0.11
-e git+https://github.com/RockefellerArchiveCenter/DACSspace.git@35d9e7686b660844a831391c4ac3d2fd3ce0be86#egg=dacsspace
exceptiongroup==1.2.2
idna==3.3
iniconfig==2.1.0
jsonschema==4.4.0
more-itertools==8.12.0
packaging==24.2
pluggy==1.5.0
pyrsistent==0.18.1
pytest==8.3.5
PyYAML==6.0
rapidfuzz==2.0.5
requests==2.27.1
structlog==21.5.0
tomli==2.2.1
urllib3==1.26.8
| name: DACSspace
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- archivessnake==0.9.1
- attrs==21.4.0
- boltons==21.0.0
- certifi==2021.10.8
- charset-normalizer==2.0.11
- exceptiongroup==1.2.2
- idna==3.3
- iniconfig==2.1.0
- jsonschema==4.4.0
- more-itertools==8.12.0
- packaging==24.2
- pluggy==1.5.0
- pyrsistent==0.18.1
- pytest==8.3.5
- pyyaml==6.0
- rapidfuzz==2.0.5
- requests==2.27.1
- structlog==21.5.0
- tomli==2.2.1
- urllib3==1.26.8
prefix: /opt/conda/envs/DACSspace
| [
"tests/test_validator.py::TestValidator::test_schema",
"tests/test_validator.py::TestValidator::test_validator"
] | [] | [] | [] | MIT License | 12,522 | 698 | [
"dacsspace/dacsspace.py",
"dacsspace/validator.py"
] |
|
GenericMappingTools__pygmt-1857 | 61781e433986d02825c7d553bd7b9bced9de3c76 | 2022-03-30 14:33:36 | 1067fa316b214d470f26f4affba743a4835a53c8 | weiji14: I think I'll stop here for now to keep this PR fairly 'small'. There's still a list of other functions that need to be refactored to use `.get()`. Search for `" not in kwargs` and you'll find this list:
- [ ] meca
- [ ] nearneighbor
- [ ] sphdistance
- [ ] sphinterpolate
- [ ] surface
- [ ] text
- [ ] xyz2grd
Would someone else be willing to open a Part 2 PR for this?
meghanrjones: > Would someone else be willing to open a Part 2 PR for this?
I could open part 2.
weiji14: > > Would someone else be willing to open a Part 2 PR for this?
>
> I could open part 2.
Cool, I tried to use the walrus operator in this Part 1 PR but it didn't seem to work for the functions I was refactoring. Maybe you'll have better luck in Part 2 :wink:
meghanrjones: > > > Would someone else be willing to open a Part 2 PR for this?
> >
> >
> > I could open part 2.
>
> Cool, I tried to use the walrus operator in this Part 1 PR but it didn't seem to work for the functions I was refactoring. Maybe you'll have better luck in Part 2 😉
Should I open the PR before or after this one is approved/merged?
weiji14: > > > > Would someone else be willing to open a Part 2 PR for this?
> > >
> > >
> > > I could open part 2.
> >
> >
> > Cool, I tried to use the walrus operator in this Part 1 PR but it didn't seem to work for the functions I was refactoring. Maybe you'll have better luck in Part 2 wink
>
> Should I open the PR before or after this one is approved/merged?
You can open the Part 2 PR now if you want, I think there shouldn't be any conflicts. Probably better to work on both in parallel too in case we notice anything wrong with the implementation in one or the other.
weiji14: /format
meghanrjones: > Just to be safe, let's wait until the Part 2 PR is opened and finalized before merging this into the main branch. I'll keep this PR in draft for now.
Working on it now!
meghanrjones: I am leaving meca out of part 2, since it is undergoing a complete refactor in https://github.com/GenericMappingTools/pygmt/pull/1784. | diff --git a/pygmt/src/grd2cpt.py b/pygmt/src/grd2cpt.py
index 3c20e8c8..13d45b90 100644
--- a/pygmt/src/grd2cpt.py
+++ b/pygmt/src/grd2cpt.py
@@ -160,14 +160,14 @@ def grd2cpt(grid, **kwargs):
``categorical=True``.
{V}
"""
- if "W" in kwargs and "Ww" in kwargs:
+ if kwargs.get("W") is not None and kwargs.get("Ww") is not None:
raise GMTInvalidInput("Set only categorical or cyclic to True, not both.")
with Session() as lib:
file_context = lib.virtualfile_from_data(check_kind="raster", data=grid)
with file_context as infile:
- if "H" not in kwargs: # if no output is set
+ if kwargs.get("H") is None: # if no output is set
arg_str = build_arg_string(kwargs, infile=infile)
- if "H" in kwargs: # if output is set
+ else: # if output is set
outfile, kwargs["H"] = kwargs["H"], True
if not outfile or not isinstance(outfile, str):
raise GMTInvalidInput("'output' should be a proper file name.")
diff --git a/pygmt/src/grd2xyz.py b/pygmt/src/grd2xyz.py
index 54f03fdc..bb347009 100644
--- a/pygmt/src/grd2xyz.py
+++ b/pygmt/src/grd2xyz.py
@@ -159,7 +159,7 @@ def grd2xyz(grid, output_type="pandas", outfile=None, **kwargs):
elif outfile is None and output_type == "file":
raise GMTInvalidInput("Must specify 'outfile' for ASCII output.")
- if "o" in kwargs and output_type == "pandas":
+ if kwargs.get("o") is not None and output_type == "pandas":
raise GMTInvalidInput(
"If 'outcols' is specified, 'output_type' must be either 'numpy'"
"or 'file'."
diff --git a/pygmt/src/grdgradient.py b/pygmt/src/grdgradient.py
index 70200d9b..98ada1ea 100644
--- a/pygmt/src/grdgradient.py
+++ b/pygmt/src/grdgradient.py
@@ -164,7 +164,7 @@ def grdgradient(grid, **kwargs):
>>> new_grid = pygmt.grdgradient(grid=grid, azimuth=10)
"""
with GMTTempFile(suffix=".nc") as tmpfile:
- if "Q" in kwargs and "N" not in kwargs:
+ if kwargs.get("Q") is not None and kwargs.get("N") is None:
raise GMTInvalidInput("""Must specify normalize if tiles is specified.""")
if not args_in_kwargs(args=["A", "D", "E"], kwargs=kwargs):
raise GMTInvalidInput(
@@ -174,9 +174,8 @@ def grdgradient(grid, **kwargs):
with Session() as lib:
file_context = lib.virtualfile_from_data(check_kind="raster", data=grid)
with file_context as infile:
- if "G" not in kwargs: # if outgrid is unset, output to tempfile
- kwargs.update({"G": tmpfile.name})
- outgrid = kwargs["G"]
+ if (outgrid := kwargs.get("G")) is None:
+ kwargs["G"] = outgrid = tmpfile.name # output to tmpfile
lib.call_module("grdgradient", build_arg_string(kwargs, infile=infile))
return load_dataarray(outgrid) if outgrid == tmpfile.name else None
diff --git a/pygmt/src/grdimage.py b/pygmt/src/grdimage.py
index 8cc3d737..de130e70 100644
--- a/pygmt/src/grdimage.py
+++ b/pygmt/src/grdimage.py
@@ -166,7 +166,7 @@ def grdimage(self, grid, **kwargs):
file_context = lib.virtualfile_from_data(check_kind="raster", data=grid)
with contextlib.ExitStack() as stack:
# shading using an xr.DataArray
- if "I" in kwargs and data_kind(kwargs["I"]) == "grid":
+ if kwargs.get("I") is not None and data_kind(kwargs["I"]) == "grid":
shading_context = lib.virtualfile_from_grid(kwargs["I"])
kwargs["I"] = stack.enter_context(shading_context)
diff --git a/pygmt/src/grdview.py b/pygmt/src/grdview.py
index 0fbd8bca..03718807 100644
--- a/pygmt/src/grdview.py
+++ b/pygmt/src/grdview.py
@@ -126,7 +126,8 @@ def grdview(self, grid, **kwargs):
file_context = lib.virtualfile_from_data(check_kind="raster", data=grid)
with contextlib.ExitStack() as stack:
- if "G" in kwargs: # deal with kwargs["G"] if drapegrid is xr.DataArray
+ if kwargs.get("G") is not None:
+ # deal with kwargs["G"] if drapegrid is xr.DataArray
drapegrid = kwargs["G"]
if data_kind(drapegrid) in ("file", "grid"):
if data_kind(drapegrid) == "grid":
diff --git a/pygmt/src/makecpt.py b/pygmt/src/makecpt.py
index b0a8895d..bbb167dc 100644
--- a/pygmt/src/makecpt.py
+++ b/pygmt/src/makecpt.py
@@ -147,11 +147,11 @@ def makecpt(**kwargs):
``categorical=True``.
"""
with Session() as lib:
- if "W" in kwargs and "Ww" in kwargs:
+ if kwargs.get("W") is not None and kwargs.get("Ww") is not None:
raise GMTInvalidInput("Set only categorical or cyclic to True, not both.")
- if "H" not in kwargs: # if no output is set
+ if kwargs.get("H") is None: # if no output is set
arg_str = build_arg_string(kwargs)
- elif "H" in kwargs: # if output is set
+ else: # if output is set
outfile, kwargs["H"] = kwargs.pop("H"), True
if not outfile or not isinstance(outfile, str):
raise GMTInvalidInput("'output' should be a proper file name.")
diff --git a/pygmt/src/plot.py b/pygmt/src/plot.py
index 82fd0187..a9a21fbb 100644
--- a/pygmt/src/plot.py
+++ b/pygmt/src/plot.py
@@ -218,15 +218,15 @@ def plot(self, data=None, x=None, y=None, size=None, direction=None, **kwargs):
kind = data_kind(data, x, y)
extra_arrays = []
- if "S" in kwargs and kwargs["S"][0] in "vV" and direction is not None:
+ if kwargs.get("S") is not None and kwargs["S"][0] in "vV" and direction is not None:
extra_arrays.extend(direction)
elif (
- "S" not in kwargs
+ kwargs.get("S") is None
and kind == "geojson"
and data.geom_type.isin(["Point", "MultiPoint"]).all()
): # checking if the geometry of a geoDataFrame is Point or MultiPoint
kwargs["S"] = "s0.2c"
- elif "S" not in kwargs and kind == "file" and data.endswith(".gmt"):
+ elif kwargs.get("S") is None and kind == "file" and data.endswith(".gmt"):
# checking that the data is a file path to set default style
try:
with open(which(data), mode="r", encoding="utf8") as file:
@@ -236,7 +236,7 @@ def plot(self, data=None, x=None, y=None, size=None, direction=None, **kwargs):
kwargs["S"] = "s0.2c"
except FileNotFoundError:
pass
- if "G" in kwargs and is_nonstr_iter(kwargs["G"]):
+ if kwargs.get("G") is not None and is_nonstr_iter(kwargs["G"]):
if kind != "vectors":
raise GMTInvalidInput(
"Can't use arrays for color if data is matrix or file."
@@ -251,7 +251,7 @@ def plot(self, data=None, x=None, y=None, size=None, direction=None, **kwargs):
extra_arrays.append(size)
for flag in ["I", "t"]:
- if flag in kwargs and is_nonstr_iter(kwargs[flag]):
+ if kwargs.get(flag) is not None and is_nonstr_iter(kwargs[flag]):
if kind != "vectors":
raise GMTInvalidInput(
f"Can't use arrays for {plot.aliases[flag]} if data is matrix or file."
diff --git a/pygmt/src/plot3d.py b/pygmt/src/plot3d.py
index 504daf6b..adecd87a 100644
--- a/pygmt/src/plot3d.py
+++ b/pygmt/src/plot3d.py
@@ -188,15 +188,15 @@ def plot3d(
kind = data_kind(data, x, y, z)
extra_arrays = []
- if "S" in kwargs and kwargs["S"][0] in "vV" and direction is not None:
+ if kwargs.get("S") is not None and kwargs["S"][0] in "vV" and direction is not None:
extra_arrays.extend(direction)
elif (
- "S" not in kwargs
+ kwargs.get("S") is None
and kind == "geojson"
and data.geom_type.isin(["Point", "MultiPoint"]).all()
): # checking if the geometry of a geoDataFrame is Point or MultiPoint
kwargs["S"] = "u0.2c"
- elif "S" not in kwargs and kind == "file" and data.endswith(".gmt"):
+ elif kwargs.get("S") is None and kind == "file" and data.endswith(".gmt"):
# checking that the data is a file path to set default style
try:
with open(which(data), mode="r", encoding="utf8") as file:
@@ -206,7 +206,7 @@ def plot3d(
kwargs["S"] = "u0.2c"
except FileNotFoundError:
pass
- if "G" in kwargs and is_nonstr_iter(kwargs["G"]):
+ if kwargs.get("G") is not None and is_nonstr_iter(kwargs["G"]):
if kind != "vectors":
raise GMTInvalidInput(
"Can't use arrays for color if data is matrix or file."
@@ -221,7 +221,7 @@ def plot3d(
extra_arrays.append(size)
for flag in ["I", "t"]:
- if flag in kwargs and is_nonstr_iter(kwargs[flag]):
+ if kwargs.get(flag) is not None and is_nonstr_iter(kwargs[flag]):
if kind != "vectors":
raise GMTInvalidInput(
f"Can't use arrays for {plot3d.aliases[flag]} if data is matrix or file."
diff --git a/pygmt/src/project.py b/pygmt/src/project.py
index 21a0d858..4715e7db 100644
--- a/pygmt/src/project.py
+++ b/pygmt/src/project.py
@@ -210,13 +210,13 @@ def project(data=None, x=None, y=None, z=None, outfile=None, **kwargs):
by ``outfile``)
"""
- if "C" not in kwargs:
+ if kwargs.get("C") is None:
raise GMTInvalidInput("The `center` parameter must be specified.")
- if "G" not in kwargs and data is None:
+ if kwargs.get("G") is None and data is None:
raise GMTInvalidInput(
"The `data` parameter must be specified unless `generate` is used."
)
- if "G" in kwargs and "F" in kwargs:
+ if kwargs.get("G") is not None and kwargs.get("F") is not None:
raise GMTInvalidInput(
"The `convention` parameter is not allowed with `generate`."
)
@@ -225,7 +225,7 @@ def project(data=None, x=None, y=None, z=None, outfile=None, **kwargs):
if outfile is None: # Output to tmpfile if outfile is not set
outfile = tmpfile.name
with Session() as lib:
- if "G" not in kwargs:
+ if kwargs.get("G") is None:
# Choose how data will be passed into the module
table_context = lib.virtualfile_from_data(
check_kind="vector", data=data, x=x, y=y, z=z, required_z=False
@@ -240,7 +240,7 @@ def project(data=None, x=None, y=None, z=None, outfile=None, **kwargs):
# if user did not set outfile, return pd.DataFrame
if outfile == tmpfile.name:
- if "G" in kwargs:
+ if kwargs.get("G") is not None:
column_names = list("rsp")
result = pd.read_csv(tmpfile.name, sep="\t", names=column_names)
else:
diff --git a/pygmt/src/solar.py b/pygmt/src/solar.py
index 5b0c0a35..51c830d1 100644
--- a/pygmt/src/solar.py
+++ b/pygmt/src/solar.py
@@ -66,7 +66,7 @@ def solar(self, terminator="d", terminator_datetime=None, **kwargs):
"""
kwargs = self._preprocess(**kwargs) # pylint: disable=protected-access
- if "T" in kwargs:
+ if kwargs.get("T") is not None:
raise GMTInvalidInput(
"Use 'terminator' and 'terminator_datetime' instead of 'T'."
)
diff --git a/pygmt/src/text.py b/pygmt/src/text.py
index 2e81596c..d12ffcd9 100644
--- a/pygmt/src/text.py
+++ b/pygmt/src/text.py
@@ -215,7 +215,7 @@ def text_(
extra_arrays = []
# If an array of transparency is given, GMT will read it from
# the last numerical column per data record.
- if "t" in kwargs and is_nonstr_iter(kwargs["t"]):
+ if kwargs.get("t") is not None and is_nonstr_iter(kwargs["t"]):
extra_arrays.append(kwargs["t"])
kwargs["t"] = ""
diff --git a/pygmt/src/velo.py b/pygmt/src/velo.py
index 77d7115b..c62c46ff 100644
--- a/pygmt/src/velo.py
+++ b/pygmt/src/velo.py
@@ -238,8 +238,12 @@ def velo(self, data=None, **kwargs):
"""
kwargs = self._preprocess(**kwargs) # pylint: disable=protected-access
- if "S" not in kwargs or ("S" in kwargs and not isinstance(kwargs["S"], str)):
- raise GMTInvalidInput("Spec is a required argument and has to be a string.")
+ if kwargs.get("S") is None or (
+ kwargs.get("S") is not None and not isinstance(kwargs["S"], str)
+ ):
+ raise GMTInvalidInput(
+ "The parameter `spec` is required and has to be a string."
+ )
if isinstance(data, np.ndarray) and not pd.api.types.is_numeric_dtype(data):
raise GMTInvalidInput(
| Figure.grdimage does not respect shading=None
**Description of the problem**
Figure.grdimage() crashes when specifying the option `shading=None`. According to the docstring, `shading=None` is the default value, so this is weird. For the script I am using, I need to always specify something for shading (either nothing, or a file), so this breaks everything for me. This appears to have been broken since version 0.4
**Full code that generated the error**
This works
```python
fig = pygmt.Figure()
fig.grdimage(grid="@earth_relief_01d_g")
fig = pygmt.Figure()
fig.grdimage(grid="@earth_relief_01d_g", shading="@earth_relief_01d_g+d")
```
This doesn't
```python
fig = pygmt.Figure()
fig.grdimage(grid="@earth_relief_01d_g", shading=None)
```
**Full error message**
```
---------------------------------------------------------------------------
GMTInvalidInput Traceback (most recent call last)
Input In [10], in <cell line: 4>()
1 import pygmt
3 fig = pygmt.Figure()
----> 4 fig.grdimage(grid="@earth_relief_01d_g", shading=None)
File /opt/miniconda3/envs/testing/lib/python3.9/site-packages/pygmt/helpers/decorators.py:585, in use_alias.<locals>.alias_decorator.<locals>.new_module(*args, **kwargs)
580 msg = (
581 f"Short-form parameter ({short_param}) is not recommended. "
582 f"Use long-form parameter '{long_alias}' instead."
583 )
584 warnings.warn(msg, category=SyntaxWarning, stacklevel=2)
--> 585 return module_func(*args, **kwargs)
File /opt/miniconda3/envs/testing/lib/python3.9/site-packages/pygmt/helpers/decorators.py:725, in kwargs_to_strings.<locals>.converter.<locals>.new_module(*args, **kwargs)
723 kwargs[arg] = separators[fmt].join(f"{item}" for item in value)
724 # Execute the original function and return its output
--> 725 return module_func(*args, **kwargs)
File /opt/miniconda3/envs/testing/lib/python3.9/site-packages/pygmt/src/grdimage.py:169, in grdimage(self, grid, **kwargs)
166 file_context = lib.virtualfile_from_data(check_kind="raster", data=grid)
167 with contextlib.ExitStack() as stack:
168 # shading using an xr.DataArray
--> 169 if "I" in kwargs and data_kind(kwargs["I"]) == "grid":
170 shading_context = lib.virtualfile_from_grid(kwargs["I"])
171 kwargs["I"] = stack.enter_context(shading_context)
File /opt/miniconda3/envs/testing/lib/python3.9/site-packages/pygmt/helpers/utils.py:69, in data_kind(data, x, y, z, required_z)
19 """
20 Check what kind of data is provided to a module.
21
(...)
66 'grid'
67 """
68 if data is None and x is None and y is None:
---> 69 raise GMTInvalidInput("No input data provided.")
70 if data is not None and (x is not None or y is not None or z is not None):
71 raise GMTInvalidInput("Too much data. Use either data or x and y.")
GMTInvalidInput: No input data provided.
```
**System information**
macos and ubuntu.
| GenericMappingTools/pygmt | diff --git a/pygmt/tests/test_grd2xyz.py b/pygmt/tests/test_grd2xyz.py
index 97abb590..4cb172d3 100644
--- a/pygmt/tests/test_grd2xyz.py
+++ b/pygmt/tests/test_grd2xyz.py
@@ -45,7 +45,7 @@ def test_grd2xyz_format(grid):
np.testing.assert_allclose(orig_val, xyz_val)
xyz_array = grd2xyz(grid=grid, output_type="numpy")
assert isinstance(xyz_array, np.ndarray)
- xyz_df = grd2xyz(grid=grid, output_type="pandas")
+ xyz_df = grd2xyz(grid=grid, output_type="pandas", outcols=None)
assert isinstance(xyz_df, pd.DataFrame)
assert list(xyz_df.columns) == ["lon", "lat", "z"]
diff --git a/pygmt/tests/test_grdgradient.py b/pygmt/tests/test_grdgradient.py
index 4f908a22..df4810e2 100644
--- a/pygmt/tests/test_grdgradient.py
+++ b/pygmt/tests/test_grdgradient.py
@@ -57,8 +57,13 @@ def test_grdgradient_no_outgrid(grid, expected_grid):
"""
Test the azimuth and direction parameters for grdgradient with no set
outgrid.
+
+ This is a regression test for
+ https://github.com/GenericMappingTools/pygmt/issues/1807.
"""
- result = grdgradient(grid=grid, azimuth=10, region=[-53, -49, -20, -17])
+ result = grdgradient(
+ grid=grid, azimuth=10, region=[-53, -49, -20, -17], outgrid=None
+ )
# check information of the output grid
assert isinstance(result, xr.DataArray)
assert result.gmt.gtype == 1 # Geographic grid
diff --git a/pygmt/tests/test_grdimage.py b/pygmt/tests/test_grdimage.py
index b6ec545b..efa1e73e 100644
--- a/pygmt/tests/test_grdimage.py
+++ b/pygmt/tests/test_grdimage.py
@@ -92,6 +92,21 @@ def test_grdimage_file():
return fig
[email protected]_image_compare(filename="test_grdimage_slice.png")
[email protected]("shading", [None, False])
+def test_grdimage_default_no_shading(grid, shading):
+ """
+ Plot an image with no shading.
+
+ This is a regression test for
+ https://github.com/GenericMappingTools/pygmt/issues/1852
+ """
+ grid_ = grid.sel(lat=slice(-30, 30))
+ fig = Figure()
+ fig.grdimage(grid_, cmap="earth", projection="M6i", shading=shading)
+ return fig
+
+
@check_figures_equal()
@pytest.mark.parametrize(
"shading",
diff --git a/pygmt/tests/test_text.py b/pygmt/tests/test_text.py
index b0f5158e..c160b821 100644
--- a/pygmt/tests/test_text.py
+++ b/pygmt/tests/test_text.py
@@ -337,6 +337,20 @@ def test_text_varying_transparency():
return fig
[email protected]_image_compare(filename="test_text_input_single_filename.png")
[email protected]("transparency", [None, False, 0])
+def test_text_no_transparency(transparency):
+ """
+ Add text with no transparency set.
+
+ This is a regression test for
+ https://github.com/GenericMappingTools/pygmt/issues/1852.
+ """
+ fig = Figure()
+ fig.text(region=[10, 70, -5, 10], textfiles=POINTS_DATA, transparency=transparency)
+ return fig
+
+
@pytest.mark.mpl_image_compare
def test_text_nonstr_text():
"""
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 12
} | 0.6 | {
"env_vars": null,
"env_yml_path": [
"environment.yml"
],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": true,
"packages": "environment.yml",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-doctestplus",
"pytest-mpl"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiohappyeyeballs @ file:///home/conda/feedstock_root/build_artifacts/aiohappyeyeballs_1741775197943/work
aiohttp @ file:///home/conda/feedstock_root/build_artifacts/aiohttp_1742268596946/work
aiohttp-retry @ file:///home/conda/feedstock_root/build_artifacts/aiohttp-retry_1743371080905/work
aiosignal @ file:///home/conda/feedstock_root/build_artifacts/aiosignal_1734342155601/work
alabaster @ file:///home/conda/feedstock_root/build_artifacts/alabaster_1704848697227/work
amqp @ file:///home/conda/feedstock_root/build_artifacts/amqp_1733906301603/work
annotated-types @ file:///home/conda/feedstock_root/build_artifacts/annotated-types_1733247046149/work
antlr4-python3-runtime @ file:///home/conda/feedstock_root/build_artifacts/antlr-python-runtime-meta_1638309185939/work
anyio @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_anyio_1742243108/work
appdirs @ file:///home/conda/feedstock_root/build_artifacts/appdirs_1733753955715/work
argon2-cffi @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi_1733311059102/work
argon2-cffi-bindings @ file:///home/conda/feedstock_root/build_artifacts/argon2-cffi-bindings_1725356560642/work
arrow @ file:///home/conda/feedstock_root/build_artifacts/arrow_1733584251875/work
astroid @ file:///home/conda/feedstock_root/build_artifacts/astroid_1741614576512/work
asttokens @ file:///home/conda/feedstock_root/build_artifacts/asttokens_1733250440834/work
async-lru @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_async-lru_1742153708/work
async-timeout @ file:///home/conda/feedstock_root/build_artifacts/async-timeout_1733235340728/work
asyncssh @ file:///home/conda/feedstock_root/build_artifacts/asyncssh_1739897873808/work
atpublic @ file:///home/conda/feedstock_root/build_artifacts/atpublic_1737771474411/work
attrs @ file:///home/conda/feedstock_root/build_artifacts/attrs_1741918516150/work
babel @ file:///home/conda/feedstock_root/build_artifacts/babel_1738490167835/work
beautifulsoup4 @ file:///home/conda/feedstock_root/build_artifacts/beautifulsoup4_1738740337718/work
billiard @ file:///home/conda/feedstock_root/build_artifacts/billiard_1726941169531/work
black @ file:///home/conda/feedstock_root/build_artifacts/black-recipe_1742502760723/work
blackdoc @ file:///home/conda/feedstock_root/build_artifacts/blackdoc_1737385998318/work
bleach @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_bleach_1737382993/work
branca @ file:///home/conda/feedstock_root/build_artifacts/branca_1734433375112/work
Brotli @ file:///home/conda/feedstock_root/build_artifacts/brotli-split_1725267488082/work
build @ file:///croot/build_1692303725845/work
cached-property @ file:///home/conda/feedstock_root/build_artifacts/cached_property_1615209429212/work
celery @ file:///home/conda/feedstock_root/build_artifacts/celery_1703899296064/work
certifi @ file:///home/conda/feedstock_root/build_artifacts/certifi_1739515848642/work/certifi
cffi @ file:///home/conda/feedstock_root/build_artifacts/cffi_1725571112467/work
cftime @ file:///home/conda/feedstock_root/build_artifacts/cftime_1725400455427/work
charset-normalizer @ file:///home/conda/feedstock_root/build_artifacts/charset-normalizer_1735929714516/work
click @ file:///home/conda/feedstock_root/build_artifacts/click_1734858813237/work
click-didyoumean @ file:///home/conda/feedstock_root/build_artifacts/click-didyoumean_1734293070305/work
click-plugins @ file:///home/conda/feedstock_root/build_artifacts/click-plugins_1733731077999/work
click-repl @ file:///home/conda/feedstock_root/build_artifacts/click-repl_1694959444233/work
cligj @ file:///home/conda/feedstock_root/build_artifacts/cligj_1733749956636/work
colorama @ file:///home/conda/feedstock_root/build_artifacts/colorama_1733218098505/work
comm @ file:///home/conda/feedstock_root/build_artifacts/comm_1733502965406/work
configobj @ file:///home/conda/feedstock_root/build_artifacts/configobj_1734075408845/work
contourpy @ file:///home/conda/feedstock_root/build_artifacts/contourpy_1727293517607/work
coverage @ file:///home/conda/feedstock_root/build_artifacts/coverage_1743381224823/work
cryptography @ file:///home/conda/feedstock_root/build_artifacts/cryptography-split_1672672393437/work
cycler @ file:///home/conda/feedstock_root/build_artifacts/cycler_1733332471406/work
debugpy @ file:///home/conda/feedstock_root/build_artifacts/debugpy_1741148409996/work
decorator @ file:///home/conda/feedstock_root/build_artifacts/decorator_1740384970518/work
defusedxml @ file:///home/conda/feedstock_root/build_artifacts/defusedxml_1615232257335/work
dictdiffer @ file:///home/conda/feedstock_root/build_artifacts/dictdiffer_1734344338200/work
dill @ file:///home/conda/feedstock_root/build_artifacts/dill_1733249551891/work
diskcache @ file:///home/conda/feedstock_root/build_artifacts/diskcache_1734196270869/work
distro @ file:///home/conda/feedstock_root/build_artifacts/distro_1734729835256/work
docformatter @ file:///home/conda/feedstock_root/build_artifacts/docformatter_1734377400330/work
docutils @ file:///home/conda/feedstock_root/build_artifacts/docutils_1733217766141/work
dpath @ file:///home/conda/feedstock_root/build_artifacts/dpath_1718243458415/work
dulwich @ file:///home/conda/feedstock_root/build_artifacts/dulwich_1740965108287/work
dvc @ file:///home/conda/feedstock_root/build_artifacts/dvc_1701829840006/work
dvc-data @ file:///home/conda/feedstock_root/build_artifacts/dvc-data_1701860897492/work
dvc-http @ file:///home/conda/feedstock_root/build_artifacts/dvc-http_1734723780438/work
dvc-objects @ file:///home/conda/feedstock_root/build_artifacts/dvc-objects_1701884954877/work
dvc-render @ file:///home/conda/feedstock_root/build_artifacts/dvc-render_1734673264834/work
dvc-studio-client @ file:///home/conda/feedstock_root/build_artifacts/dvc-studio-client_1734664628961/work
dvc-task @ file:///home/conda/feedstock_root/build_artifacts/dvc-task_1734664522689/work
entrypoints @ file:///home/conda/feedstock_root/build_artifacts/entrypoints_1733327148154/work
exceptiongroup @ file:///home/conda/feedstock_root/build_artifacts/exceptiongroup_1733208806608/work
executing @ file:///home/conda/feedstock_root/build_artifacts/executing_1733569351617/work
fastjsonschema @ file:///home/conda/feedstock_root/build_artifacts/python-fastjsonschema_1733235979760/work/dist
filelock @ file:///home/conda/feedstock_root/build_artifacts/filelock_1741969488311/work
Fiona @ file:///home/conda/feedstock_root/build_artifacts/fiona_1653911972420/work
flake8 @ file:///home/conda/feedstock_root/build_artifacts/flake8_1739898391164/work
flatten-dict @ file:///home/conda/feedstock_root/build_artifacts/flatten-dict_1629457542349/work
flufl.lock @ file:///home/conda/feedstock_root/build_artifacts/flufl.lock_1661682505165/work
folium @ file:///home/conda/feedstock_root/build_artifacts/folium_1740766619747/work
fonttools @ file:///home/conda/feedstock_root/build_artifacts/fonttools_1738940303262/work
fqdn @ file:///home/conda/feedstock_root/build_artifacts/fqdn_1733327382592/work/dist
frozenlist @ file:///home/conda/feedstock_root/build_artifacts/frozenlist_1737645236190/work
fsspec @ file:///home/conda/feedstock_root/build_artifacts/fsspec_1743361113926/work
funcy @ file:///home/conda/feedstock_root/build_artifacts/funcy_1734381131891/work
future @ file:///home/conda/feedstock_root/build_artifacts/future_1738926421307/work
GDAL==3.5.0
geopandas @ file:///home/conda/feedstock_root/build_artifacts/geopandas_1714335488963/work
gitdb @ file:///home/conda/feedstock_root/build_artifacts/gitdb_1735887193964/work
GitPython @ file:///home/conda/feedstock_root/build_artifacts/gitpython_1735929639977/work
grandalf @ file:///home/conda/feedstock_root/build_artifacts/grandalf_1734664611384/work
gssapi @ file:///home/conda/feedstock_root/build_artifacts/python-gssapi_1666947090853/work
gto @ file:///home/conda/feedstock_root/build_artifacts/gto_1704857118902/work
h11 @ file:///home/conda/feedstock_root/build_artifacts/h11_1733327467879/work
h2 @ file:///home/conda/feedstock_root/build_artifacts/h2_1738578511449/work
hpack @ file:///home/conda/feedstock_root/build_artifacts/hpack_1737618293087/work
httpcore @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_httpcore_1731707562/work
httpx @ file:///home/conda/feedstock_root/build_artifacts/httpx_1733663348460/work
hydra-core @ file:///home/conda/feedstock_root/build_artifacts/hydra-core_1736934708722/work
hyperframe @ file:///home/conda/feedstock_root/build_artifacts/hyperframe_1737618333194/work
idna @ file:///home/conda/feedstock_root/build_artifacts/idna_1733211830134/work
imagesize @ file:///home/conda/feedstock_root/build_artifacts/imagesize_1656939531508/work
importlib_metadata @ file:///home/conda/feedstock_root/build_artifacts/importlib-metadata_1737420181517/work
importlib_resources @ file:///home/conda/feedstock_root/build_artifacts/importlib_resources_1736252299705/work
iniconfig @ file:///home/conda/feedstock_root/build_artifacts/iniconfig_1733223141826/work
ipykernel @ file:///home/conda/feedstock_root/build_artifacts/ipykernel_1719845459717/work
ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1701831663892/work
ipywidgets @ file:///home/conda/feedstock_root/build_artifacts/ipywidgets_1733493556527/work
isoduration @ file:///home/conda/feedstock_root/build_artifacts/isoduration_1733493628631/work/dist
isort @ file:///home/conda/feedstock_root/build_artifacts/isort_1740643408806/work
iterative-telemetry @ file:///home/conda/feedstock_root/build_artifacts/iterative-telemetry_1739252628161/work
jedi @ file:///home/conda/feedstock_root/build_artifacts/jedi_1733300866624/work
Jinja2 @ file:///home/conda/feedstock_root/build_artifacts/jinja2_1741263328855/work
joblib @ file:///home/conda/feedstock_root/build_artifacts/joblib_1733736026804/work
json5 @ file:///home/conda/feedstock_root/build_artifacts/json5_1733272076743/work
jsonpointer @ file:///home/conda/feedstock_root/build_artifacts/jsonpointer_1725302957584/work
jsonschema @ file:///home/conda/feedstock_root/build_artifacts/jsonschema_1733472696581/work
jsonschema-specifications @ file:///tmp/tmpk0f344m9/src
jupyter @ file:///home/conda/feedstock_root/build_artifacts/jupyter_1733818543322/work
jupyter-console @ file:///home/conda/feedstock_root/build_artifacts/jupyter_console_1733817997778/work
jupyter-events @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_jupyter_events_1738765986/work
jupyter-lsp @ file:///home/conda/feedstock_root/build_artifacts/jupyter-lsp-meta_1733492907176/work/jupyter-lsp
jupyter_client @ file:///home/conda/feedstock_root/build_artifacts/jupyter_client_1733440914442/work
jupyter_core @ file:///home/conda/feedstock_root/build_artifacts/jupyter_core_1727163409502/work
jupyter_server @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_1734702637701/work
jupyter_server_terminals @ file:///home/conda/feedstock_root/build_artifacts/jupyter_server_terminals_1733427956852/work
jupyterlab @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_1741964057182/work
jupyterlab_pygments @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_pygments_1733328101776/work
jupyterlab_server @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_server_1733599573484/work
jupyterlab_widgets @ file:///home/conda/feedstock_root/build_artifacts/jupyterlab_widgets_1733428046021/work
kiwisolver @ file:///home/conda/feedstock_root/build_artifacts/kiwisolver_1725459266648/work
kombu @ file:///home/conda/feedstock_root/build_artifacts/kombu_1699281730408/work
mapclassify @ file:///home/conda/feedstock_root/build_artifacts/mapclassify_1733731066416/work
markdown-it-py @ file:///home/conda/feedstock_root/build_artifacts/markdown-it-py_1733250460757/work
MarkupSafe @ file:///home/conda/feedstock_root/build_artifacts/markupsafe_1733219680183/work
matplotlib==3.9.1
matplotlib-inline @ file:///home/conda/feedstock_root/build_artifacts/matplotlib-inline_1733416936468/work
mccabe @ file:///home/conda/feedstock_root/build_artifacts/mccabe_1733216466933/work
mdit-py-plugins @ file:///home/conda/feedstock_root/build_artifacts/mdit-py-plugins_1733854715505/work
mdurl @ file:///home/conda/feedstock_root/build_artifacts/mdurl_1733255585584/work
mistune @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_mistune_1742402716/work
more-itertools @ file:///home/conda/feedstock_root/build_artifacts/more-itertools_1736883817510/work
multidict @ file:///home/conda/feedstock_root/build_artifacts/multidict_1742308123521/work
munch @ file:///home/conda/feedstock_root/build_artifacts/munch_1734441240299/work
munkres==1.1.4
mypy_extensions @ file:///home/conda/feedstock_root/build_artifacts/mypy_extensions_1733230897951/work
myst-parser @ file:///home/conda/feedstock_root/build_artifacts/myst-parser_1714413780344/work
nanotime==0.5.2
nbclient @ file:///home/conda/feedstock_root/build_artifacts/nbclient_1734628800805/work
nbconvert @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_nbconvert-core_1738067871/work
nbformat @ file:///home/conda/feedstock_root/build_artifacts/nbformat_1733402752141/work
nest_asyncio @ file:///home/conda/feedstock_root/build_artifacts/nest-asyncio_1733325553580/work
netCDF4 @ file:///home/conda/feedstock_root/build_artifacts/netcdf4_1656505566883/work
networkx @ file:///home/conda/feedstock_root/build_artifacts/networkx_1698504735452/work
notebook @ file:///home/conda/feedstock_root/build_artifacts/notebook_1741968175534/work
notebook_shim @ file:///home/conda/feedstock_root/build_artifacts/notebook-shim_1733408315203/work
numpy @ file:///home/conda/feedstock_root/build_artifacts/numpy_1707225342954/work/dist/numpy-1.26.4-cp39-cp39-linux_x86_64.whl#sha256=c799942b5898f6e6c60264d1663a6469a475290e758c654aeeb78e2596463abd
omegaconf @ file:///home/conda/feedstock_root/build_artifacts/omegaconf_1670575376789/work
orjson @ file:///home/conda/feedstock_root/build_artifacts/orjson_1742909849040/work
overrides @ file:///home/conda/feedstock_root/build_artifacts/overrides_1734587627321/work
packaging @ file:///home/conda/feedstock_root/build_artifacts/packaging_1733203243479/work
pandas @ file:///home/conda/feedstock_root/build_artifacts/pandas_1736810577256/work
pandocfilters @ file:///home/conda/feedstock_root/build_artifacts/pandocfilters_1631603243851/work
parso @ file:///home/conda/feedstock_root/build_artifacts/parso_1733271261340/work
pathlib2 @ file:///home/conda/feedstock_root/build_artifacts/pathlib2_1725350185303/work
pathspec @ file:///home/conda/feedstock_root/build_artifacts/pathspec_1733233363808/work
pexpect @ file:///home/conda/feedstock_root/build_artifacts/pexpect_1733301927746/work
pickleshare @ file:///home/conda/feedstock_root/build_artifacts/pickleshare_1733327343728/work
Pillow @ file:///home/conda/feedstock_root/build_artifacts/pillow_1666920578599/work
pkgutil_resolve_name @ file:///home/conda/feedstock_root/build_artifacts/pkgutil-resolve-name_1733344503739/work
platformdirs @ file:///home/conda/feedstock_root/build_artifacts/platformdirs_1696272223550/work
pluggy @ file:///home/conda/feedstock_root/build_artifacts/pluggy_1733222765875/work
ply @ file:///home/conda/feedstock_root/build_artifacts/ply_1733239724146/work
prometheus_client @ file:///home/conda/feedstock_root/build_artifacts/prometheus_client_1733327310477/work
prompt_toolkit @ file:///home/conda/feedstock_root/build_artifacts/prompt-toolkit_1737453357274/work
propcache @ file:///home/conda/feedstock_root/build_artifacts/propcache_1737635528546/work
psutil @ file:///home/conda/feedstock_root/build_artifacts/psutil_1740663125313/work
ptyprocess @ file:///home/conda/feedstock_root/build_artifacts/ptyprocess_1733302279685/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl#sha256=92c32ff62b5fd8cf325bec5ab90d7be3d2a8ca8c8a3813ff487a8d2002630d1f
pure_eval @ file:///home/conda/feedstock_root/build_artifacts/pure_eval_1733569405015/work
pycodestyle @ file:///home/conda/feedstock_root/build_artifacts/pycodestyle_1733216196861/work
pycparser @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_pycparser_1733195786/work
pydantic @ file:///home/conda/feedstock_root/build_artifacts/pydantic_1743418918215/work
pydantic_core @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_pydantic-core_1743201081/work
pydot @ file:///home/conda/feedstock_root/build_artifacts/pydot_1737244087476/work
pyflakes @ file:///home/conda/feedstock_root/build_artifacts/pyflakes_1733216066937/work
pygit2 @ file:///home/conda/feedstock_root/build_artifacts/pygit2_1692684244600/work
Pygments @ file:///home/conda/feedstock_root/build_artifacts/pygments_1736243443484/work
-e git+https://github.com/GenericMappingTools/pygmt.git@61781e433986d02825c7d553bd7b9bced9de3c76#egg=pygmt
pygtrie @ file:///home/conda/feedstock_root/build_artifacts/pygtrie_1734664549100/work
pylint @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_pylint_1741550910/work
pyOpenSSL @ file:///home/conda/feedstock_root/build_artifacts/pyopenssl_1685514481738/work
pyparsing @ file:///home/conda/feedstock_root/build_artifacts/pyparsing_1743089729650/work
pyproj @ file:///home/conda/feedstock_root/build_artifacts/pyproj_1662785946154/work
pyproject_hooks @ file:///home/conda/feedstock_root/build_artifacts/pyproject_hooks_1733710025763/work
PyQt5==5.15.7
PyQt5-sip==12.11.0
PySocks @ file:///home/conda/feedstock_root/build_artifacts/pysocks_1733217236728/work
pytest @ file:///home/conda/feedstock_root/build_artifacts/pytest_1740946542080/work
pytest-cov @ file:///home/conda/feedstock_root/build_artifacts/pytest-cov_1733223023082/work
pytest-doctestplus @ file:///home/conda/feedstock_root/build_artifacts/pytest-doctestplus_1737819197221/work
pytest-mpl @ file:///home/conda/feedstock_root/build_artifacts/pytest-mpl_1734116536345/work
python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/python-dateutil_1733215673016/work
python-json-logger @ file:///home/conda/feedstock_root/build_artifacts/python-json-logger_1677079630776/work
pytz @ file:///home/conda/feedstock_root/build_artifacts/pytz_1706886791323/work
PyYAML @ file:///home/conda/feedstock_root/build_artifacts/pyyaml_1737454647378/work
pyzmq @ file:///home/conda/feedstock_root/build_artifacts/pyzmq_1725430390447/work
referencing @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_referencing_1737836872/work
requests @ file:///home/conda/feedstock_root/build_artifacts/requests_1733217035951/work
rfc3339_validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3339-validator_1733599910982/work
rfc3986-validator @ file:///home/conda/feedstock_root/build_artifacts/rfc3986-validator_1598024191506/work
rich @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_rich_1743371105/work/dist
rpds-py @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_rpds-py_1743037693/work
rtree @ file:///home/conda/feedstock_root/build_artifacts/rtree_1741378561624/work
ruamel.yaml @ file:///home/conda/feedstock_root/build_artifacts/ruamel.yaml_1736248037007/work
ruamel.yaml.clib @ file:///home/conda/feedstock_root/build_artifacts/ruamel.yaml.clib_1728724456970/work
scikit-learn @ file:///home/conda/feedstock_root/build_artifacts/scikit-learn_1736496755362/work/dist/scikit_learn-1.6.1-cp39-cp39-linux_x86_64.whl#sha256=e8f978e37bb47e04e1337a63f75697b723d6d25f58e477734555faed033884ba
scipy @ file:///home/conda/feedstock_root/build_artifacts/scipy-split_1716470218293/work/dist/scipy-1.13.1-cp39-cp39-linux_x86_64.whl#sha256=e6696cb8683d94467891b7648e068a3970f6bc0a1b3c1aa7f9bc89458eafd2f0
scmrepo @ file:///home/conda/feedstock_root/build_artifacts/scmrepo_1700664740976/work
semver @ file:///home/conda/feedstock_root/build_artifacts/semver_1737841553927/work
Send2Trash @ file:///home/conda/feedstock_root/build_artifacts/send2trash_1733322040660/work
Shapely @ file:///home/conda/feedstock_root/build_artifacts/shapely_1654372646138/work
shellingham @ file:///home/conda/feedstock_root/build_artifacts/shellingham_1733300899265/work
shortuuid @ file:///home/conda/feedstock_root/build_artifacts/shortuuid_1734272317000/work
shtab @ file:///home/conda/feedstock_root/build_artifacts/shtab_1734664526617/work
sip @ file:///home/conda/feedstock_root/build_artifacts/sip_1697300422453/work
six @ file:///home/conda/feedstock_root/build_artifacts/six_1733380938961/work
smmap @ file:///home/conda/feedstock_root/build_artifacts/smmap_1739781697784/work
sniffio @ file:///home/conda/feedstock_root/build_artifacts/sniffio_1733244044561/work
snowballstemmer @ file:///home/conda/feedstock_root/build_artifacts/snowballstemmer_1637143057757/work
soupsieve @ file:///home/conda/feedstock_root/build_artifacts/soupsieve_1693929250441/work
Sphinx @ file:///home/conda/feedstock_root/build_artifacts/sphinx_1721487534232/work
sphinx-copybutton @ file:///home/conda/feedstock_root/build_artifacts/sphinx-copybutton_1734572975006/work
sphinx-gallery @ file:///home/conda/feedstock_root/build_artifacts/sphinx-gallery_1739451496361/work
sphinx_rtd_theme @ file:///home/conda/feedstock_root/build_artifacts/sphinx_rtd_theme_1730015256242/work
sphinxcontrib-applehelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-applehelp_1733754101641/work
sphinxcontrib-devhelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-devhelp_1733754113405/work
sphinxcontrib-htmlhelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-htmlhelp_1733754280555/work
sphinxcontrib-jquery @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-jquery_1734344508263/work
sphinxcontrib-jsmath @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-jsmath_1733753744933/work
sphinxcontrib-qthelp @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-qthelp_1733753408017/work
sphinxcontrib-serializinghtml @ file:///home/conda/feedstock_root/build_artifacts/sphinxcontrib-serializinghtml_1733750479108/work
sqltrie @ file:///home/conda/feedstock_root/build_artifacts/sqltrie_1739984874333/work
stack_data @ file:///home/conda/feedstock_root/build_artifacts/stack_data_1733569443808/work
tabulate @ file:///home/conda/feedstock_root/build_artifacts/tabulate_1733589744265/work
terminado @ file:///home/conda/feedstock_root/build_artifacts/terminado_1710262609923/work
threadpoolctl @ file:///home/conda/feedstock_root/build_artifacts/threadpoolctl_1741878222898/work
tinycss2 @ file:///home/conda/feedstock_root/build_artifacts/tinycss2_1729802851396/work
toml @ file:///home/conda/feedstock_root/build_artifacts/toml_1734091811753/work
tomli @ file:///home/conda/feedstock_root/build_artifacts/tomli_1733256695513/work
tomlkit @ file:///home/conda/feedstock_root/build_artifacts/tomlkit_1733230743009/work
tornado @ file:///home/conda/feedstock_root/build_artifacts/tornado_1732615921868/work
tqdm @ file:///home/conda/feedstock_root/build_artifacts/tqdm_1735661334605/work
traitlets @ file:///home/conda/feedstock_root/build_artifacts/traitlets_1733367359838/work
typer==0.15.2
typer-slim==0.15.2
types-python-dateutil @ file:///home/conda/feedstock_root/build_artifacts/types-python-dateutil_1733612335562/work
typing-inspection @ file:///home/conda/feedstock_root/build_artifacts/typing-inspection_1741438046699/work
typing_extensions @ file:///home/conda/feedstock_root/build_artifacts/bld/rattler-build_typing_extensions_1743201626/work
typing_utils @ file:///home/conda/feedstock_root/build_artifacts/typing_utils_1733331286120/work
tzdata @ file:///home/conda/feedstock_root/build_artifacts/python-tzdata_1742745135198/work
unicodedata2 @ file:///home/conda/feedstock_root/build_artifacts/unicodedata2_1736692503055/work
untokenize @ file:///home/conda/feedstock_root/build_artifacts/untokenize_1734420909700/work
uri-template @ file:///home/conda/feedstock_root/build_artifacts/uri-template_1733323593477/work/dist
urllib3 @ file:///home/conda/feedstock_root/build_artifacts/urllib3_1734859416348/work
vine @ file:///home/conda/feedstock_root/build_artifacts/vine_1733906372582/work
voluptuous @ file:///home/conda/feedstock_root/build_artifacts/voluptuous_1734219447985/work
wcwidth @ file:///home/conda/feedstock_root/build_artifacts/wcwidth_1733231326287/work
webcolors @ file:///home/conda/feedstock_root/build_artifacts/webcolors_1733359735138/work
webencodings @ file:///home/conda/feedstock_root/build_artifacts/webencodings_1733236011802/work
websocket-client @ file:///home/conda/feedstock_root/build_artifacts/websocket-client_1733157342724/work
widgetsnbextension @ file:///home/conda/feedstock_root/build_artifacts/widgetsnbextension_1733128559935/work
xarray @ file:///home/conda/feedstock_root/build_artifacts/xarray_1722348170975/work
xyzservices @ file:///home/conda/feedstock_root/build_artifacts/xyzservices_1737234886776/work
yarl @ file:///home/conda/feedstock_root/build_artifacts/yarl_1737575777699/work
zc.lockfile @ file:///home/conda/feedstock_root/build_artifacts/zc.lockfile_1732886357525/work
zipp @ file:///home/conda/feedstock_root/build_artifacts/zipp_1732827521216/work
zstandard==0.23.0
| name: pygmt
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- aiohappyeyeballs=2.6.1=pyhd8ed1ab_0
- aiohttp=3.11.14=py39h9399b63_0
- aiohttp-retry=2.9.1=pyhd8ed1ab_0
- aiosignal=1.3.2=pyhd8ed1ab_0
- alabaster=0.7.16=pyhd8ed1ab_0
- alsa-lib=1.2.8=h166bdaf_0
- amqp=5.2.0=pyhd8ed1ab_2
- annotated-types=0.7.0=pyhd8ed1ab_1
- antlr-python-runtime=4.9.3=pyhd8ed1ab_1
- anyio=4.9.0=pyh29332c3_0
- appdirs=1.4.4=pyhd8ed1ab_1
- argon2-cffi=23.1.0=pyhd8ed1ab_1
- argon2-cffi-bindings=21.2.0=py39h8cd3c5a_5
- arrow=1.3.0=pyhd8ed1ab_1
- astroid=3.3.9=py39hf3d152e_0
- asttokens=3.0.0=pyhd8ed1ab_1
- async-lru=2.0.5=pyh29332c3_0
- async-timeout=5.0.1=pyhd8ed1ab_1
- asyncssh=2.20.0=pyhd8ed1ab_0
- atk-1.0=2.38.0=hd4edc92_1
- atpublic=5.1=pyhd8ed1ab_0
- attr=2.5.1=h166bdaf_1
- attrs=25.3.0=pyh71513ae_0
- babel=2.17.0=pyhd8ed1ab_0
- backports.zoneinfo=0.2.1=py39hf3d152e_9
- beautifulsoup4=4.13.3=pyha770c72_0
- billiard=4.2.1=py39h8cd3c5a_0
- black=25.1.0=pyha5154f8_0
- blackdoc=0.3.9=pyhd8ed1ab_1
- bleach=6.2.0=pyh29332c3_4
- bleach-with-css=6.2.0=h82add2a_4
- blosc=1.21.5=hc2324a3_1
- boost-cpp=1.78.0=h5adbc97_2
- branca=0.8.1=pyhd8ed1ab_0
- brotli=1.1.0=hb9d3cd8_2
- brotli-bin=1.1.0=hb9d3cd8_2
- brotli-python=1.1.0=py39hf88036b_2
- build=0.10.0=py39h06a4308_0
- bzip2=1.0.8=h4bc722e_7
- c-ares=1.34.4=hb9d3cd8_0
- ca-certificates=2025.2.25=h06a4308_0
- cached-property=1.5.2=hd8ed1ab_1
- cached_property=1.5.2=pyha770c72_1
- cairo=1.16.0=ha61ee94_1014
- celery=5.3.5=pyhd8ed1ab_0
- certifi=2025.1.31=pyhd8ed1ab_0
- cffi=1.17.1=py39h15c3d72_0
- cfitsio=4.1.0=hd9d235c_0
- cftime=1.6.4=py39hf3d9206_1
- charset-normalizer=3.4.1=pyhd8ed1ab_0
- click=8.1.8=pyh707e725_0
- click-didyoumean=0.3.1=pyhd8ed1ab_1
- click-plugins=1.1.1=pyhd8ed1ab_1
- click-repl=0.3.0=pyhd8ed1ab_0
- cligj=0.7.2=pyhd8ed1ab_2
- colorama=0.4.6=pyhd8ed1ab_1
- comm=0.2.2=pyhd8ed1ab_1
- configobj=5.0.9=pyhd8ed1ab_1
- contourpy=1.3.0=py39h74842e3_2
- coverage=7.8.0=py39h9399b63_0
- cryptography=39.0.0=py39hd598818_0
- curl=7.86.0=h7bff187_1
- cycler=0.12.1=pyhd8ed1ab_1
- dbus=1.13.6=h5008d03_3
- dcw-gmt=2.2.0=ha770c72_0
- debugpy=1.8.13=py39hf88036b_0
- decorator=5.2.1=pyhd8ed1ab_0
- defusedxml=0.7.1=pyhd8ed1ab_0
- dictdiffer=0.9.0=pyhd8ed1ab_1
- dill=0.3.9=pyhd8ed1ab_1
- diskcache=5.6.3=pyhd8ed1ab_1
- distro=1.9.0=pyhd8ed1ab_1
- docformatter=1.7.5=pyhd8ed1ab_1
- docutils=0.21.2=pyhd8ed1ab_1
- dpath=2.2.0=pyha770c72_0
- dulwich=0.22.8=py39he612d8f_0
- dvc=3.33.0=pyhd8ed1ab_0
- dvc-data=2.22.4=pyhd8ed1ab_0
- dvc-http=2.32.0=pyhd8ed1ab_1
- dvc-objects=1.4.9=pyhd8ed1ab_0
- dvc-render=1.0.2=pyhd8ed1ab_1
- dvc-studio-client=0.21.0=pyhd8ed1ab_1
- dvc-task=0.40.2=pyhd8ed1ab_1
- entrypoints=0.4=pyhd8ed1ab_1
- exceptiongroup=1.2.2=pyhd8ed1ab_1
- executing=2.1.0=pyhd8ed1ab_1
- expat=2.6.4=h5888daf_0
- fftw=3.3.10=nompi_hf1063bd_110
- filelock=3.18.0=pyhd8ed1ab_0
- fiona=1.8.21=py39h54775ec_2
- flake8=7.1.2=pyhd8ed1ab_0
- flatten-dict=0.4.2=pyhd8ed1ab_1
- flufl.lock=7.1=pyhd8ed1ab_0
- folium=0.19.5=pyhd8ed1ab_0
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=h77eed37_3
- fontconfig=2.14.2=h14ed4e7_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- fonttools=4.56.0=py39h9399b63_0
- fqdn=1.5.1=pyhd8ed1ab_1
- freetype=2.12.1=h267a509_2
- freexl=1.0.6=h166bdaf_1
- fribidi=1.0.10=h36c2ea0_0
- frozenlist=1.5.0=py39h9399b63_1
- fsspec=2025.3.1=pyhd8ed1ab_0
- funcy=2.0=pyhd8ed1ab_1
- future=1.0.0=pyhd8ed1ab_2
- gdal=3.5.0=py39hc691d54_4
- gdk-pixbuf=2.42.10=h5eee18b_1
- geopandas=0.14.4=pyhd8ed1ab_0
- geopandas-base=0.14.4=pyha770c72_0
- geos=3.10.3=h27087fc_0
- geotiff=1.7.1=h4fc65e6_3
- gettext=0.23.1=h5888daf_0
- gettext-tools=0.23.1=h5888daf_0
- ghostscript=10.04.0=h5888daf_0
- giflib=5.2.2=hd590300_0
- gitdb=4.0.12=pyhd8ed1ab_0
- gitpython=3.1.44=pyhff2d567_0
- glib=2.78.4=hfc55251_0
- glib-tools=2.78.4=hfc55251_0
- gmt=6.3.0=he424f55_6
- grandalf=0.7=pyhd8ed1ab_1
- graphite2=1.3.13=h59595ed_1003
- graphviz=8.0.3=h2e5815a_0
- gshhg-gmt=2.3.7=ha770c72_1003
- gst-plugins-base=1.21.3=h4243ec0_1
- gstreamer=1.21.3=h25f0c4b_1
- gstreamer-orc=0.4.41=h17648ed_0
- gtk2=2.24.33=h90689f9_2
- gto=1.6.2=pyhd8ed1ab_0
- gts=0.7.6=h977cf35_4
- h11=0.14.0=pyhd8ed1ab_1
- h2=4.2.0=pyhd8ed1ab_0
- harfbuzz=6.0.0=h8e241bc_0
- hdf4=4.2.15=h9772cbc_5
- hdf5=1.12.1=nompi_h2386368_104
- hpack=4.1.0=pyhd8ed1ab_0
- httpcore=1.0.7=pyh29332c3_1
- httpx=0.28.1=pyhd8ed1ab_0
- hydra-core=1.3.2=pyhd8ed1ab_1
- hyperframe=6.1.0=pyhd8ed1ab_0
- icu=70.1=h27087fc_0
- idna=3.10=pyhd8ed1ab_1
- imagesize=1.4.1=pyhd8ed1ab_0
- importlib-metadata=8.6.1=pyha770c72_0
- importlib-resources=6.5.2=pyhd8ed1ab_0
- importlib_resources=6.5.2=pyhd8ed1ab_0
- iniconfig=2.0.0=pyhd8ed1ab_1
- ipykernel=6.29.5=pyh3099207_0
- ipython=8.18.1=pyh707e725_3
- ipywidgets=8.1.5=pyhd8ed1ab_1
- isoduration=20.11.0=pyhd8ed1ab_1
- isort=6.0.1=pyhd8ed1ab_0
- iterative-telemetry=0.0.10=pyhd8ed1ab_0
- jack=1.9.22=h11f4161_0
- jedi=0.19.2=pyhd8ed1ab_1
- jinja2=3.1.6=pyhd8ed1ab_0
- joblib=1.4.2=pyhd8ed1ab_1
- jpeg=9e=h0b41bf4_3
- json-c=0.16=hc379101_0
- json5=0.10.0=pyhd8ed1ab_1
- jsonpointer=3.0.0=py39hf3d152e_1
- jsonschema=4.23.0=pyhd8ed1ab_1
- jsonschema-specifications=2024.10.1=pyhd8ed1ab_1
- jsonschema-with-format-nongpl=4.23.0=hd8ed1ab_1
- jupyter=1.1.1=pyhd8ed1ab_1
- jupyter-lsp=2.2.5=pyhd8ed1ab_1
- jupyter_client=8.6.3=pyhd8ed1ab_1
- jupyter_console=6.6.3=pyhd8ed1ab_1
- jupyter_core=5.7.2=pyh31011fe_1
- jupyter_events=0.12.0=pyh29332c3_0
- jupyter_server=2.15.0=pyhd8ed1ab_0
- jupyter_server_terminals=0.5.3=pyhd8ed1ab_1
- jupyterlab=4.3.6=pyhd8ed1ab_0
- jupyterlab_pygments=0.3.0=pyhd8ed1ab_2
- jupyterlab_server=2.27.3=pyhd8ed1ab_1
- jupyterlab_widgets=3.0.13=pyhd8ed1ab_1
- kealib=1.4.15=hfe1a663_0
- keyutils=1.6.1=h166bdaf_0
- kiwisolver=1.4.7=py39h74842e3_0
- kombu=5.3.3=py39hf3d152e_0
- krb5=1.19.3=h3790be6_0
- lame=3.100=h166bdaf_1003
- lcms2=2.14=h6ed2654_0
- ld_impl_linux-64=2.40=h12ee557_0
- lerc=4.0.0=h27087fc_0
- libasprintf=0.23.1=h8e693c7_0
- libasprintf-devel=0.23.1=h8e693c7_0
- libblas=3.9.0=31_h59b9bed_openblas
- libbrotlicommon=1.1.0=hb9d3cd8_2
- libbrotlidec=1.1.0=hb9d3cd8_2
- libbrotlienc=1.1.0=hb9d3cd8_2
- libcap=2.66=ha37c62d_0
- libcblas=3.9.0=31_he106b2a_openblas
- libclang=15.0.7=default_h127d8a8_5
- libclang13=15.0.7=default_h5d6823c_5
- libcups=2.3.3=h3e49a29_2
- libcurl=7.86.0=h7bff187_1
- libdap4=3.20.6=hd7c4107_2
- libdb=6.2.32=h9c3ff4c_0
- libdeflate=1.14=h166bdaf_0
- libedit=3.1.20191231=he28a2e2_2
- libev=4.33=hd590300_2
- libevent=2.1.10=h9b69904_4
- libexpat=2.6.4=h5888daf_0
- libffi=3.4.4=h6a678d5_1
- libflac=1.4.3=h59595ed_0
- libgcc=14.2.0=h767d61c_2
- libgcc-ng=14.2.0=h69a702a_2
- libgcrypt=1.11.0=ha770c72_2
- libgcrypt-devel=1.11.0=hb9d3cd8_2
- libgcrypt-lib=1.11.0=hb9d3cd8_2
- libgcrypt-tools=1.11.0=hb9d3cd8_2
- libgd=2.3.3=h695aa2c_1
- libgdal=3.5.0=hc0ebe42_4
- libgettextpo=0.23.1=h5888daf_0
- libgettextpo-devel=0.23.1=h5888daf_0
- libgfortran=14.2.0=h69a702a_2
- libgfortran-ng=14.2.0=h69a702a_2
- libgfortran5=14.2.0=hf1ad2bd_2
- libgit2=1.6.4=ha637b67_0
- libglib=2.78.4=h783c2da_0
- libgomp=14.2.0=h767d61c_2
- libgpg-error=1.51=hbd13f7d_1
- libiconv=1.18=h4ce23a2_1
- libkml=1.3.0=h01aab08_1016
- liblapack=3.9.0=31_h7ac8fdf_openblas
- libllvm15=15.0.7=hadd5161_1
- libltdl=2.4.3a=h5888daf_0
- libnetcdf=4.8.1=nompi_h329d8a1_102
- libnghttp2=1.51.0=hdcd2b5c_0
- libnsl=2.0.1=hd590300_0
- libogg=1.3.5=h4ab18f5_0
- libopenblas=0.3.29=pthreads_h94d23a6_0
- libopus=1.3.1=h7f98852_1
- libpng=1.6.43=h2797004_0
- libpq=14.5=h72a31a5_3
- librsvg=2.54.4=h7abd40a_0
- librttopo=1.1.0=h40fdbc5_10
- libsndfile=1.2.2=hc60ed4a_1
- libsodium=1.0.18=h36c2ea0_1
- libspatialindex=2.1.0=he57a185_0
- libspatialite=5.0.1=hc16130b_17
- libsqlite=3.46.0=hde9e2c9_0
- libssh2=1.10.0=haa6b8db_3
- libstdcxx=14.2.0=h8f9b012_2
- libstdcxx-ng=14.2.0=h4852527_2
- libsystemd0=252=h2a991cd_0
- libtiff=4.4.0=h82bc61c_5
- libtool=2.5.4=h5888daf_0
- libudev1=253=h0b41bf4_0
- libuuid=2.38.1=h0b41bf4_0
- libvorbis=1.3.7=h9c3ff4c_0
- libwebp-base=1.5.0=h851e524_0
- libxcb=1.13=h7f98852_1004
- libxkbcommon=1.5.0=h79f4944_1
- libxml2=2.10.3=hca2bb57_4
- libzip=1.9.2=hc869a4a_1
- libzlib=1.2.13=h4ab18f5_6
- lz4-c=1.9.4=hcb278e6_0
- make=4.4.1=hb9d3cd8_2
- mapclassify=2.8.1=pyhd8ed1ab_1
- markdown-it-py=3.0.0=pyhd8ed1ab_1
- markupsafe=3.0.2=py39h9399b63_1
- matplotlib=3.9.1=py39hf3d152e_1
- matplotlib-base=3.9.1=py39h0565ad7_2
- matplotlib-inline=0.1.7=pyhd8ed1ab_1
- mccabe=0.7.0=pyhd8ed1ab_1
- mdit-py-plugins=0.4.2=pyhd8ed1ab_1
- mdurl=0.1.2=pyhd8ed1ab_1
- mistune=3.1.3=pyh29332c3_0
- more-itertools=10.6.0=pyhd8ed1ab_0
- mpg123=1.32.9=hc50e24c_0
- multidict=6.2.0=py39h9399b63_0
- munch=4.0.0=pyhd8ed1ab_1
- munkres=1.1.4=pyh9f0ad1d_0
- mypy_extensions=1.0.0=pyha770c72_1
- mysql-common=8.0.32=h14678bc_0
- mysql-libs=8.0.32=h54cf53e_0
- myst-parser=3.0.1=pyhd8ed1ab_0
- nanotime=0.5.2=py_0
- nbclient=0.10.2=pyhd8ed1ab_0
- nbconvert-core=7.16.6=pyh29332c3_0
- nbformat=5.10.4=pyhd8ed1ab_1
- ncurses=6.4=h6a678d5_0
- nest-asyncio=1.6.0=pyhd8ed1ab_1
- netcdf4=1.6.0=nompi_py39hf5a3a3f_100
- networkx=3.2.1=pyhd8ed1ab_0
- notebook=7.3.3=pyhd8ed1ab_0
- notebook-shim=0.2.4=pyhd8ed1ab_1
- nspr=4.36=h5888daf_0
- nss=3.100=hca3bf56_0
- numpy=1.26.4=py39h474f0d3_0
- omegaconf=2.3.0=pyhd8ed1ab_0
- openjpeg=2.5.0=h7d73246_1
- openssl=1.1.1w=hd590300_0
- orjson=3.10.16=py39he612d8f_0
- overrides=7.7.0=pyhd8ed1ab_1
- packaging=24.2=pyhd8ed1ab_2
- pandas=2.2.3=py39h3b40f6f_2
- pandocfilters=1.5.0=pyhd8ed1ab_0
- pango=1.50.14=hd33c08f_0
- parso=0.8.4=pyhd8ed1ab_1
- pathlib2=2.3.7.post1=py39hf3d152e_4
- pathspec=0.12.1=pyhd8ed1ab_1
- pcre=8.45=h9c3ff4c_0
- pcre2=10.42=hcad00b1_0
- pexpect=4.9.0=pyhd8ed1ab_1
- pickleshare=0.7.5=pyhd8ed1ab_1004
- pillow=9.2.0=py39hf3a2cdf_3
- pip=25.0.1=pyh8b19718_0
- pixman=0.44.2=h29eaf8c_0
- pkgutil-resolve-name=1.3.10=pyhd8ed1ab_2
- platformdirs=3.11.0=pyhd8ed1ab_0
- pluggy=1.5.0=pyhd8ed1ab_1
- ply=3.11=pyhd8ed1ab_3
- poppler=22.04.0=h0733791_3
- poppler-data=0.4.12=hd8ed1ab_0
- postgresql=14.5=h5bbe9e2_3
- proj=9.0.1=h93bde94_1
- prometheus_client=0.21.1=pyhd8ed1ab_0
- prompt-toolkit=3.0.50=pyha770c72_0
- prompt_toolkit=3.0.50=hd8ed1ab_0
- propcache=0.2.1=py39h9399b63_1
- psutil=7.0.0=py39h8cd3c5a_0
- pthread-stubs=0.4=hb9d3cd8_1002
- ptyprocess=0.7.0=pyhd8ed1ab_1
- pulseaudio=16.1=h4ab2085_1
- pure_eval=0.2.3=pyhd8ed1ab_1
- pycodestyle=2.12.1=pyhd8ed1ab_1
- pycparser=2.22=pyh29332c3_1
- pydantic=2.11.1=pyh3cfb1c2_0
- pydantic-core=2.33.0=py39h3506688_0
- pydot=3.0.4=py39hf3d152e_0
- pyflakes=3.2.0=pyhd8ed1ab_1
- pygit2=1.12.2=py39hd1e30aa_1
- pygments=2.19.1=pyhd8ed1ab_0
- pygtrie=2.5.0=pyhd8ed1ab_1
- pylint=3.3.5=pyh29332c3_0
- pyopenssl=23.2.0=pyhd8ed1ab_1
- pyparsing=3.2.3=pyhd8ed1ab_1
- pyproj=3.4.0=py39hdcf6798_0
- pyproject_hooks=1.2.0=pyhd8ed1ab_1
- pyqt=5.15.7=py39h5c7b992_3
- pyqt5-sip=12.11.0=py39h227be39_3
- pysocks=1.7.1=pyha55dd90_7
- pytest=8.3.5=pyhd8ed1ab_0
- pytest-cov=6.0.0=pyhd8ed1ab_1
- pytest-doctestplus=1.4.0=pyhd8ed1ab_0
- pytest-mpl=0.17.0=pyhd8ed1ab_1
- python=3.9.15=h47a2c10_0_cpython
- python-dateutil=2.9.0.post0=pyhff2d567_1
- python-fastjsonschema=2.21.1=pyhd8ed1ab_0
- python-gssapi=1.8.2=py39ha6ecad8_1
- python-json-logger=2.0.7=pyhd8ed1ab_0
- python-tzdata=2025.2=pyhd8ed1ab_0
- python_abi=3.9=5_cp39
- pytz=2024.1=pyhd8ed1ab_0
- pywin32-on-windows=0.1.0=pyh1179c8e_3
- pyyaml=6.0.2=py39h9399b63_2
- pyzmq=26.2.0=py39h4e4fb57_1
- qhull=2020.2=h434a139_5
- qt-main=5.15.6=h7acdfc8_2
- readline=8.2=h5eee18b_0
- referencing=0.36.2=pyh29332c3_0
- requests=2.32.3=pyhd8ed1ab_1
- rfc3339-validator=0.1.4=pyhd8ed1ab_1
- rfc3986-validator=0.1.1=pyh9f0ad1d_0
- rich=14.0.0=pyh29332c3_0
- rpds-py=0.24.0=py39h3506688_0
- rtree=1.4.0=pyh11ca60a_1
- ruamel.yaml=0.18.10=py39h8cd3c5a_0
- ruamel.yaml.clib=0.2.8=py39h8cd3c5a_1
- scikit-learn=1.6.1=py39h4b7350c_0
- scipy=1.13.1=py39haf93ffa_0
- scmrepo=1.5.0=pyhd8ed1ab_0
- semver=3.0.4=pyhd8ed1ab_0
- send2trash=1.8.3=pyh0d859eb_1
- setuptools=75.8.0=py39h06a4308_0
- shapely=1.8.2=py39h4fbd0eb_2
- shellingham=1.5.4=pyhd8ed1ab_1
- shortuuid=1.0.13=pyhd8ed1ab_1
- shtab=1.7.1=pyhd8ed1ab_1
- sip=6.7.12=py39h3d6467e_0
- six=1.17.0=pyhd8ed1ab_0
- smmap=5.0.2=pyhd8ed1ab_0
- snappy=1.2.1=h8bd8927_1
- sniffio=1.3.1=pyhd8ed1ab_1
- snowballstemmer=2.2.0=pyhd8ed1ab_0
- soupsieve=2.5=pyhd8ed1ab_1
- sphinx=7.4.7=pyhd8ed1ab_0
- sphinx-copybutton=0.5.2=pyhd8ed1ab_1
- sphinx-gallery=0.19.0=pyhd8ed1ab_0
- sphinx_rtd_theme=3.0.1=pyha770c72_0
- sphinxcontrib-applehelp=2.0.0=pyhd8ed1ab_1
- sphinxcontrib-devhelp=2.0.0=pyhd8ed1ab_1
- sphinxcontrib-htmlhelp=2.1.0=pyhd8ed1ab_1
- sphinxcontrib-jquery=4.1=pyhd8ed1ab_1
- sphinxcontrib-jsmath=1.0.1=pyhd8ed1ab_1
- sphinxcontrib-qthelp=2.0.0=pyhd8ed1ab_1
- sphinxcontrib-serializinghtml=1.1.10=pyhd8ed1ab_1
- sqlite=3.45.3=h5eee18b_0
- sqltrie=0.11.2=pyhd8ed1ab_0
- stack_data=0.6.3=pyhd8ed1ab_1
- tabulate=0.9.0=pyhd8ed1ab_2
- terminado=0.18.1=pyh0d859eb_0
- threadpoolctl=3.6.0=pyhecae5ae_0
- tiledb=2.9.5=h1e4a385_0
- tinycss2=1.4.0=pyhd8ed1ab_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd8ed1ab_1
- tomli=2.2.1=pyhd8ed1ab_1
- tomlkit=0.13.2=pyha770c72_1
- tornado=6.4.2=py39h8cd3c5a_0
- tqdm=4.67.1=pyhd8ed1ab_1
- traitlets=5.14.3=pyhd8ed1ab_1
- typer=0.15.2=pyhff008b6_0
- typer-slim=0.15.2=pyh29332c3_0
- typer-slim-standard=0.15.2=h801b22e_0
- types-python-dateutil=2.9.0.20241206=pyhd8ed1ab_0
- typing-extensions=4.13.0=h9fa5a19_1
- typing-inspection=0.4.0=pyhd8ed1ab_0
- typing_extensions=4.13.0=pyh29332c3_1
- typing_utils=0.1.0=pyhd8ed1ab_1
- tzcode=2025b=hb9d3cd8_0
- tzdata=2025a=h04d1e81_0
- unicodedata2=16.0.0=py39h8cd3c5a_0
- untokenize=0.1.1=pyhd8ed1ab_2
- uri-template=1.3.0=pyhd8ed1ab_1
- uriparser=0.9.8=hac33072_0
- urllib3=2.3.0=pyhd8ed1ab_0
- vine=5.1.0=pyhd8ed1ab_1
- voluptuous=0.15.2=pyhd8ed1ab_2
- wcwidth=0.2.13=pyhd8ed1ab_1
- webcolors=24.11.1=pyhd8ed1ab_0
- webencodings=0.5.1=pyhd8ed1ab_3
- websocket-client=1.8.0=pyhd8ed1ab_1
- wheel=0.45.1=py39h06a4308_0
- widgetsnbextension=4.0.13=pyhd8ed1ab_1
- xarray=2024.7.0=pyhd8ed1ab_0
- xcb-util=0.4.0=h516909a_0
- xcb-util-image=0.4.0=h166bdaf_0
- xcb-util-keysyms=0.4.0=h516909a_0
- xcb-util-renderutil=0.3.9=h166bdaf_0
- xcb-util-wm=0.4.1=h516909a_0
- xerces-c=3.2.4=h55805fa_1
- xkeyboard-config=2.38=h0b41bf4_0
- xorg-kbproto=1.0.7=hb9d3cd8_1003
- xorg-libice=1.1.2=hb9d3cd8_0
- xorg-libsm=1.2.6=he73a12e_0
- xorg-libx11=1.8.4=h0b41bf4_0
- xorg-libxau=1.0.12=hb9d3cd8_0
- xorg-libxdmcp=1.1.5=hb9d3cd8_0
- xorg-libxext=1.3.4=h0b41bf4_2
- xorg-libxrender=0.9.10=h7f98852_1003
- xorg-renderproto=0.11.1=hb9d3cd8_1003
- xorg-xextproto=7.3.0=hb9d3cd8_1004
- xorg-xproto=7.0.31=hb9d3cd8_1008
- xyzservices=2025.1.0=pyhd8ed1ab_0
- xz=5.6.4=h5eee18b_1
- yaml=0.2.5=h7f98852_2
- yarl=1.18.3=py39h9399b63_1
- zc.lockfile=3.0.post1=pyhd8ed1ab_1
- zeromq=4.3.5=h59595ed_1
- zipp=3.21.0=pyhd8ed1ab_1
- zlib=1.2.13=h4ab18f5_6
- zstandard=0.23.0=py39h8cd3c5a_1
- zstd=1.5.6=ha6fb4c9_0
- pip:
- pygmt==0.6.1.dev20+g61781e43
prefix: /opt/conda/envs/pygmt
| [
"pygmt/tests/test_grd2xyz.py::test_grd2xyz_format",
"pygmt/tests/test_grdgradient.py::test_grdgradient_no_outgrid"
] | [
"pygmt/tests/test_grdimage.py::test_grdimage",
"pygmt/tests/test_grdimage.py::test_grdimage_slice",
"pygmt/tests/test_grdimage.py::test_grdimage_file",
"pygmt/tests/test_grdimage.py::test_grdimage_default_no_shading[None]",
"pygmt/tests/test_grdimage.py::test_grdimage_default_no_shading[False]",
"pygmt/tests/test_grdimage.py::test_grdimage_over_dateline",
"pygmt/tests/test_grdimage.py::test_grdimage_global_subset",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians[H-0-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians[H-123-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians[H-180-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians[W-0-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians[W-123-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians[W-180-png]",
"pygmt/tests/test_text.py::test_text_single_line_of_text",
"pygmt/tests/test_text.py::test_text_multiple_lines_of_text",
"pygmt/tests/test_text.py::test_text_input_single_filename",
"pygmt/tests/test_text.py::test_text_input_remote_filename",
"pygmt/tests/test_text.py::test_text_input_multiple_filenames",
"pygmt/tests/test_text.py::test_text_position",
"pygmt/tests/test_text.py::test_text_position_offset_with_line",
"pygmt/tests/test_text.py::test_text_angle_30",
"pygmt/tests/test_text.py::test_text_font_bold",
"pygmt/tests/test_text.py::test_text_fill",
"pygmt/tests/test_text.py::test_text_pen",
"pygmt/tests/test_text.py::test_text_round_clearance",
"pygmt/tests/test_text.py::test_text_justify_bottom_right_and_top_left",
"pygmt/tests/test_text.py::test_text_justify_parsed_from_textfile",
"pygmt/tests/test_text.py::test_text_angle_font_justify_from_textfile",
"pygmt/tests/test_text.py::test_text_transparency",
"pygmt/tests/test_text.py::test_text_varying_transparency",
"pygmt/tests/test_text.py::test_text_no_transparency[None]",
"pygmt/tests/test_text.py::test_text_no_transparency[False]",
"pygmt/tests/test_text.py::test_text_no_transparency[0]",
"pygmt/tests/test_text.py::test_text_nonstr_text"
] | [
"pygmt/tests/test_grd2xyz.py::test_grd2xyz",
"pygmt/tests/test_grd2xyz.py::test_grd2xyz_file_output",
"pygmt/tests/test_grd2xyz.py::test_grd2xyz_invalid_format",
"pygmt/tests/test_grd2xyz.py::test_grd2xyz_no_outfile",
"pygmt/tests/test_grd2xyz.py::test_grd2xyz_outfile_incorrect_output_type",
"pygmt/tests/test_grd2xyz.py::test_grd2xyz_pandas_output_with_o",
"pygmt/tests/test_grdgradient.py::test_grdgradient_outgrid",
"pygmt/tests/test_grdgradient.py::test_grdgradient_fails",
"pygmt/tests/test_grdimage.py::test_grdimage_shading_xarray[True-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_shading_xarray[0.5-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_shading_xarray[+a30+nt0.8-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_shading_xarray[@earth_relief_01d_g+d-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_shading_xarray[@earth_relief_01d_g+a60+nt0.8-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_grid_and_shading_with_xarray[png]",
"pygmt/tests/test_grdimage.py::test_grdimage_fails",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians_and_standard_parallels[Q-0-0-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians_and_standard_parallels[Q-0-30-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians_and_standard_parallels[S-0-0-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians_and_standard_parallels[S-0-30-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians_and_standard_parallels[S-123-0-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians_and_standard_parallels[S-123-30-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians_and_standard_parallels[S-180-0-png]",
"pygmt/tests/test_grdimage.py::test_grdimage_central_meridians_and_standard_parallels[S-180-30-png]",
"pygmt/tests/test_text.py::test_text_without_text_input",
"pygmt/tests/test_text.py::test_text_nonexistent_filename",
"pygmt/tests/test_text.py::test_text_xy_with_position_fails"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,527 | 3,724 | [
"pygmt/src/grd2cpt.py",
"pygmt/src/grd2xyz.py",
"pygmt/src/grdgradient.py",
"pygmt/src/grdimage.py",
"pygmt/src/grdview.py",
"pygmt/src/makecpt.py",
"pygmt/src/plot.py",
"pygmt/src/plot3d.py",
"pygmt/src/project.py",
"pygmt/src/solar.py",
"pygmt/src/text.py",
"pygmt/src/velo.py"
] |
conan-io__conan-10943 | e8237a0981158ea7a7bce7153acefced7905f677 | 2022-04-01 08:44:50 | fba42152d18038a848c85b749d3a87c8b7749210 | diff --git a/conan/tools/cmake/cmake.py b/conan/tools/cmake/cmake.py
index 3d4def08b..e6d04073f 100644
--- a/conan/tools/cmake/cmake.py
+++ b/conan/tools/cmake/cmake.py
@@ -48,12 +48,11 @@ class CMake(object):
are passed to the command line, plus the ``--config Release`` for builds in multi-config
"""
- def __init__(self, conanfile, namespace=None):
+ def __init__(self, conanfile):
_validate_recipe(conanfile)
# Store a reference to useful data
self._conanfile = conanfile
- self._namespace = namespace
cmake_presets = load_cmake_presets(conanfile.generators_folder)
self._generator = cmake_presets["configurePresets"][0]["generator"]
diff --git a/conan/tools/cmake/toolchain/blocks.py b/conan/tools/cmake/toolchain/blocks.py
index 8413c86f4..655fba520 100644
--- a/conan/tools/cmake/toolchain/blocks.py
+++ b/conan/tools/cmake/toolchain/blocks.py
@@ -8,9 +8,9 @@ from jinja2 import Template
from conan.tools._compilers import architecture_flag
from conan.tools.apple.apple import is_apple_os, to_apple_arch
from conan.tools.build import build_jobs
+from conan.tools.build.cross_building import cross_building
from conan.tools.cmake.toolchain import CONAN_TOOLCHAIN_FILENAME
from conan.tools.cmake.utils import is_multi_configuration
-from conan.tools.build.cross_building import cross_building
from conan.tools.intel import IntelCC
from conan.tools.microsoft.visual import is_msvc, msvc_version_to_toolset_version
from conans.errors import ConanException
diff --git a/conan/tools/cmake/toolchain/toolchain.py b/conan/tools/cmake/toolchain/toolchain.py
index 8344dcd99..2fc2935d4 100644
--- a/conan/tools/cmake/toolchain/toolchain.py
+++ b/conan/tools/cmake/toolchain/toolchain.py
@@ -1,8 +1,8 @@
import os
import textwrap
-import six
from collections import OrderedDict
+import six
from jinja2 import Template
from conan.tools._check_build_profile import check_using_build_profile
@@ -95,7 +95,11 @@ class CMakeToolchain(object):
# Variables
{% for it, value in variables.items() %}
+ {% if value is boolean %}
+ set({{ it }} {{ value|cmake_value }} CACHE BOOL "Variable {{ it }} conan-toolchain defined")
+ {% else %}
set({{ it }} {{ value|cmake_value }} CACHE STRING "Variable {{ it }} conan-toolchain defined")
+ {% endif %}
{% endfor %}
# Variables per configuration
{{ iterate_configs(variables_config, action='set') }}
@@ -109,10 +113,9 @@ class CMakeToolchain(object):
{{ iterate_configs(preprocessor_definitions_config, action='add_definitions') }}
""")
- def __init__(self, conanfile, generator=None, namespace=None):
+ def __init__(self, conanfile, generator=None):
self._conanfile = conanfile
self.generator = self._get_generator(generator)
- self._namespace = namespace
self.variables = Variables()
self.preprocessor_definitions = Variables()
@@ -161,7 +164,7 @@ class CMakeToolchain(object):
def generate(self):
toolchain_file = self._conanfile.conf.get("tools.cmake.cmaketoolchain:toolchain_file")
if toolchain_file is None: # The main toolchain file generated only if user dont define
- save(self.filename, self.content)
+ save(os.path.join(self._conanfile.generators_folder, self.filename), self.content)
# If we're using Intel oneAPI, we need to generate the environment file and run it
if self._conanfile.settings.get_safe("compiler") == "intel-cc":
IntelCC(self._conanfile).generate()
diff --git a/conans/model/info.py b/conans/model/info.py
index d1a98c6a3..80bd185a2 100644
--- a/conans/model/info.py
+++ b/conans/model/info.py
@@ -349,6 +349,9 @@ class PythonRequireInfo(object):
self._channel = self._ref.channel
self._revision = self._ref.revision
+ def unrelated_mode(self):
+ self._name = self._version = self._user = self._channel = self._revision = None
+
class PythonRequiresInfo(object):
| [Fix] Remove "namespace" argument from CMakeToolchain
Since 1.47, generating the CMakePresets.json file, it is not used anymore.
Also from the CMake() build helper | conan-io/conan | diff --git a/conans/test/integration/package_id/python_requires_package_id_test.py b/conans/test/integration/package_id/python_requires_package_id_test.py
index bb9a67fd7..716e7b5c4 100644
--- a/conans/test/integration/package_id/python_requires_package_id_test.py
+++ b/conans/test/integration/package_id/python_requires_package_id_test.py
@@ -50,6 +50,19 @@ class PythonRequiresPackageIDTest(unittest.TestCase):
self.assertIn("tool/1.1.2", self.client2.out)
self.assertIn("pkg/0.1:387c1c797a011d426ecb25a1e01b28251e443ec8 - Build", self.client2.out)
+ def test_unrelated_conf(self):
+ # change the policy in conan.conf
+ self.client2.run("config set general.default_python_requires_id_mode=unrelated_mode")
+ self.client2.run("create . pkg/0.1@")
+ self.assertIn("tool/1.1.1", self.client2.out)
+ self.assertIn("pkg/0.1:c941ae50e2daf4a118c393591cfef6a55cd1cfad - Build", self.client2.out)
+
+ # with any change the package id doesn't change
+ self.client.run("export . tool/1.1.2@")
+ self.client2.run("create . pkg/0.1@ --build missing")
+ self.assertIn("tool/1.1.2", self.client2.out)
+ self.assertIn("pkg/0.1:c941ae50e2daf4a118c393591cfef6a55cd1cfad - Cache", self.client2.out)
+
def test_change_mode_package_id(self):
# change the policy in package_id
conanfile = textwrap.dedent("""
diff --git a/conans/test/integration/toolchains/test_toolchain_namespaces.py b/conans/test/integration/toolchains/test_toolchain_namespaces.py
index 57a40afb8..b98f8b1aa 100644
--- a/conans/test/integration/toolchains/test_toolchain_namespaces.py
+++ b/conans/test/integration/toolchains/test_toolchain_namespaces.py
@@ -2,39 +2,10 @@ import os
import textwrap
from conan.tools import CONAN_TOOLCHAIN_ARGS_FILE
-from conan.tools.cmake.presets import load_cmake_presets
from conan.tools.files.files import load_toolchain_args
from conans.test.utils.tools import TestClient
-def test_cmake_namespace():
- client = TestClient()
- namespace = "somename"
- conanfile = textwrap.dedent("""
- from conans import ConanFile
- from conan.tools.cmake import CMakeToolchain, CMake
-
- class Conan(ConanFile):
- settings = "os", "arch", "compiler", "build_type"
- def generate(self):
- cmake = CMakeToolchain(self, namespace='{0}')
- cmake.generate()
- def build(self):
- cmake = CMake(self, namespace='{0}')
- self.output.info(cmake._generator)
- self.output.info(cmake._toolchain_file)
- """.format(namespace))
-
- client.save({"conanfile.py": conanfile})
- client.run("install . ")
- presets = load_cmake_presets(client.current_folder)
- toolchain_file = presets["configurePresets"][0]["toolchainFile"]
- generator = presets["configurePresets"][0]["generator"]
- client.run("build . ")
- assert generator in client.out
- assert toolchain_file in client.out
-
-
def test_bazel_namespace():
client = TestClient()
namespace = "somename"
diff --git a/conans/test/unittests/tools/cmake/test_cmaketoolchain.py b/conans/test/unittests/tools/cmake/test_cmaketoolchain.py
index ddc5baf75..da9d29b66 100644
--- a/conans/test/unittests/tools/cmake/test_cmaketoolchain.py
+++ b/conans/test/unittests/tools/cmake/test_cmaketoolchain.py
@@ -1,3 +1,4 @@
+import os
import types
import pytest
@@ -10,6 +11,8 @@ from conans.client.conf import get_default_settings_yml
from conans.errors import ConanException
from conans.model.conf import Conf
from conans.model.env_info import EnvValues
+from conans.test.utils.test_files import temp_folder
+from conans.util.files import load
@pytest.fixture
@@ -462,3 +465,17 @@ def test_apple_cmake_osx_sysroot_sdk_mandatory(os, os_sdk, arch, expected_sdk):
with pytest.raises(ConanException) as excinfo:
CMakeToolchain(c).content()
assert "Please, specify a suitable value for os.sdk." % expected_sdk in str(excinfo.value)
+
+
+def test_variables_types(conanfile):
+ generator_folder = temp_folder()
+ conanfile.folders.set_base_generators(generator_folder)
+ # This is a trick for 1.X to use base_generator and not install folder
+ conanfile.folders.generators = "here"
+
+ toolchain = CMakeToolchain(conanfile)
+ toolchain.variables["FOO"] = True
+ toolchain.generate()
+
+ contents = load(os.path.join(conanfile.generators_folder, "conan_toolchain.cmake"))
+ assert 'set(FOO ON CACHE BOOL "Variable FOO conan-toolchain defined")' in contents
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 4
} | 1.47 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.8",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
beautifulsoup4==4.13.3
bottle==0.12.25
certifi==2025.1.31
charset-normalizer==3.4.1
colorama==0.4.6
-e git+https://github.com/conan-io/conan.git@e8237a0981158ea7a7bce7153acefced7905f677#egg=conan
distro==1.6.0
execnet==2.1.1
fasteners==0.19
idna==3.10
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==2.1.5
mock==1.3.0
node-semver==0.6.1
packaging==24.2
parameterized==0.9.0
patch-ng==1.17.4
pbr==6.1.1
pluggy==1.5.0
pluginbase==1.0.1
py==1.11.0
Pygments==2.19.1
PyJWT==1.7.1
pytest==6.2.5
pytest-xdist==3.5.0
python-dateutil==2.9.0.post0
PyYAML==5.4.1
requests==2.32.3
six==1.16.0
soupsieve==2.6
toml==0.10.2
tqdm==4.67.1
typing_extensions==4.13.0
urllib3==1.26.20
waitress==3.0.0
WebOb==1.8.9
WebTest==2.0.35
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=24.2=py38h06a4308_0
- python=3.8.20=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.1.0=py38h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.44.0=py38h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- beautifulsoup4==4.13.3
- bottle==0.12.25
- certifi==2025.1.31
- charset-normalizer==3.4.1
- colorama==0.4.6
- distro==1.6.0
- execnet==2.1.1
- fasteners==0.19
- idna==3.10
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==2.1.5
- mock==1.3.0
- node-semver==0.6.1
- packaging==24.2
- parameterized==0.9.0
- patch-ng==1.17.4
- pbr==6.1.1
- pluggy==1.5.0
- pluginbase==1.0.1
- py==1.11.0
- pygments==2.19.1
- pyjwt==1.7.1
- pytest==6.2.5
- pytest-xdist==3.5.0
- python-dateutil==2.9.0.post0
- pyyaml==5.4.1
- requests==2.32.3
- six==1.16.0
- soupsieve==2.6
- toml==0.10.2
- tqdm==4.67.1
- typing-extensions==4.13.0
- urllib3==1.26.20
- waitress==3.0.0
- webob==1.8.9
- webtest==2.0.35
prefix: /opt/conda/envs/conan
| [
"conans/test/integration/package_id/python_requires_package_id_test.py::PythonRequiresPackageIDTest::test_unrelated_conf",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_variables_types"
] | [] | [
"conans/test/integration/package_id/python_requires_package_id_test.py::PythonRequiresPackageIDTest::test_change_mode_conf",
"conans/test/integration/package_id/python_requires_package_id_test.py::PythonRequiresPackageIDTest::test_change_mode_package_id",
"conans/test/integration/package_id/python_requires_package_id_test.py::PythonRequiresPackageIDTest::test_default",
"conans/test/integration/package_id/python_requires_package_id_test.py::PythonRequiresForBuildRequiresPackageIDTest::test",
"conans/test/integration/toolchains/test_toolchain_namespaces.py::test_bazel_namespace",
"conans/test/integration/toolchains/test_toolchain_namespaces.py::test_autotools_namespace",
"conans/test/integration/toolchains/test_toolchain_namespaces.py::test_multiple_toolchains_one_recipe",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_cmake_toolchain",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_remove",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_template_remove",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_template_change",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_context_change",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_context_update",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_context_replace",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_replace_block",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_add_new_block",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_user_toolchain",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_osx_deployment_target",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_toolset",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_older_msvc_toolset",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_msvc_xp_toolsets",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_no_fpic_when_not_an_option",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_fpic_when_shared_true[True]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_fpic_when_shared_true[False]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_fpic_when_not_shared",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_no_fpic_on_windows",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_fpic_disabled",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_fpic_enabled",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_libcxx_abi_flag",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot[Macos-None-x86_64-macosx]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot[Macos-None-armv7-macosx]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot[iOS-iphonesimulator-armv8-iphonesimulator]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot[watchOS-watchsimulator-armv8-watchsimulator]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot_sdk_mandatory[iOS-None-x86_64-]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot_sdk_mandatory[watchOS-None-armv8-]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot_sdk_mandatory[tvOS-None-x86_64-]"
] | [] | MIT License | 12,541 | 1,107 | [
"conan/tools/cmake/cmake.py",
"conan/tools/cmake/toolchain/blocks.py",
"conan/tools/cmake/toolchain/toolchain.py",
"conans/model/info.py"
] |
|
asottile__setup-cfg-fmt-132 | 5efd7109ad526cf4b5b5de7dff003eb5aa1e4058 | 2022-04-02 16:02:50 | 97c79d263a803b0dd7a5cea649b3ac1ae4b9e2de | diff --git a/setup_cfg_fmt.py b/setup_cfg_fmt.py
index 34e3824..afd3a45 100644
--- a/setup_cfg_fmt.py
+++ b/setup_cfg_fmt.py
@@ -75,6 +75,12 @@ TOX_TO_CLASSIFIERS = {
}
+class NoTransformConfigParser(configparser.RawConfigParser):
+ def optionxform(self, s: str) -> str:
+ """disable default lower-casing"""
+ return s
+
+
def _adjacent_filename(setup_cfg: str, filename: str) -> str:
return os.path.join(os.path.dirname(setup_cfg), filename)
@@ -153,7 +159,7 @@ def _parse_python_requires(
def _tox_envlist(setup_cfg: str) -> Generator[str, None, None]:
tox_ini = _adjacent_filename(setup_cfg, 'tox.ini')
if os.path.exists(tox_ini):
- cfg = configparser.ConfigParser()
+ cfg = NoTransformConfigParser()
cfg.read(tox_ini)
envlist = cfg.get('tox', 'envlist', fallback='')
@@ -166,7 +172,7 @@ def _tox_envlist(setup_cfg: str) -> Generator[str, None, None]:
def _python_requires(
setup_cfg: str, *, min_py3_version: tuple[int, int],
) -> str | None:
- cfg = configparser.ConfigParser()
+ cfg = NoTransformConfigParser()
cfg.read(setup_cfg)
current_value = cfg.get('options', 'python_requires', fallback='')
classifiers = cfg.get('metadata', 'classifiers', fallback='')
@@ -207,7 +213,7 @@ def _python_requires(
def _requires(
- cfg: configparser.ConfigParser, which: str, section: str = 'options',
+ cfg: NoTransformConfigParser, which: str, section: str = 'options',
) -> list[str]:
raw = cfg.get(section, which, fallback='')
@@ -356,7 +362,7 @@ def format_file(
with open(filename) as f:
contents = f.read()
- cfg = configparser.ConfigParser()
+ cfg = NoTransformConfigParser()
cfg.read_string(contents)
_clean_sections(cfg)
@@ -467,7 +473,7 @@ def format_file(
return new_contents != contents
-def _clean_sections(cfg: configparser.ConfigParser) -> None:
+def _clean_sections(cfg: NoTransformConfigParser) -> None:
"""Removes any empty options and sections."""
for section in cfg.sections():
new_options = {k: v for k, v in cfg[section].items() if v}
| configparser is downcasing keys in unrelated sections
for instance this doesn't roundtrip:
```ini
[tool:pytest]
DJANGO_SETTINGS_MODULE = test.test
``` | asottile/setup-cfg-fmt | diff --git a/tests/setup_cfg_fmt_test.py b/tests/setup_cfg_fmt_test.py
index 0bc8310..b3372a7 100644
--- a/tests/setup_cfg_fmt_test.py
+++ b/tests/setup_cfg_fmt_test.py
@@ -951,6 +951,22 @@ def test_imp_classifiers_pypy_only(tmpdir):
)
+def test_leaves_casing_of_unrelated_settings(tmpdir):
+ setup_cfg = tmpdir.join('setup.cfg')
+ setup_cfg.write(
+ '[metadata]\n'
+ 'name = pkg\n'
+ 'version = 1.0\n'
+ 'classifiers =\n'
+ ' Programming Language :: Python :: Implementation :: CPython\n'
+ '\n'
+ '[tool:pytest]\n'
+ 'DJANGO_SETTINGS_MODULE = test.test\n',
+ )
+
+ assert not main((str(setup_cfg),))
+
+
def test_natural_sort():
classifiers = [
'Programming Language :: Python :: 3',
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 1.20 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cffi==1.17.1
covdefaults==2.3.0
coverage==7.8.0
exceptiongroup==1.2.2
identify==2.6.9
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
pycparser==2.22
pytest==8.3.5
-e git+https://github.com/asottile/setup-cfg-fmt.git@5efd7109ad526cf4b5b5de7dff003eb5aa1e4058#egg=setup_cfg_fmt
tomli==2.2.1
ukkonen==1.0.1
| name: setup-cfg-fmt
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cffi==1.17.1
- covdefaults==2.3.0
- coverage==7.8.0
- exceptiongroup==1.2.2
- identify==2.6.9
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pycparser==2.22
- pytest==8.3.5
- tomli==2.2.1
- ukkonen==1.0.1
prefix: /opt/conda/envs/setup-cfg-fmt
| [
"tests/setup_cfg_fmt_test.py::test_leaves_casing_of_unrelated_settings"
] | [] | [
"tests/setup_cfg_fmt_test.py::test_ver_type_ok",
"tests/setup_cfg_fmt_test.py::test_ver_type_error",
"tests/setup_cfg_fmt_test.py::test_ver_type_not_a_version",
"tests/setup_cfg_fmt_test.py::test_case_insensitive_glob[foo-[Ff][Oo][Oo]]",
"tests/setup_cfg_fmt_test.py::test_case_insensitive_glob[FOO-[Ff][Oo][Oo]]",
"tests/setup_cfg_fmt_test.py::test_case_insensitive_glob[licen[sc]e-[Ll][Ii][Cc][Ee][Nn][SsCc][Ee]]",
"tests/setup_cfg_fmt_test.py::test_noop",
"tests/setup_cfg_fmt_test.py::test_rewrite_requires[install_requires-normalizes",
"tests/setup_cfg_fmt_test.py::test_rewrite_requires[setup_requires-normalizes",
"tests/setup_cfg_fmt_test.py::test_rewrite[orders",
"tests/setup_cfg_fmt_test.py::test_rewrite[normalizes",
"tests/setup_cfg_fmt_test.py::test_rewrite[sorts",
"tests/setup_cfg_fmt_test.py::test_rewrite[normalize",
"tests/setup_cfg_fmt_test.py::test_normalize_lib[no",
"tests/setup_cfg_fmt_test.py::test_normalize_lib[whitespace",
"tests/setup_cfg_fmt_test.py::test_normalize_lib[<=",
"tests/setup_cfg_fmt_test.py::test_normalize_lib[>=",
"tests/setup_cfg_fmt_test.py::test_normalize_lib[b/w",
"tests/setup_cfg_fmt_test.py::test_normalize_lib[compatible",
"tests/setup_cfg_fmt_test.py::test_adds_long_description_with_readme[README.rst-text/x-rst]",
"tests/setup_cfg_fmt_test.py::test_adds_long_description_with_readme[README.markdown-text/markdown]",
"tests/setup_cfg_fmt_test.py::test_adds_long_description_with_readme[README.md-text/markdown]",
"tests/setup_cfg_fmt_test.py::test_adds_long_description_with_readme[README-text/plain]",
"tests/setup_cfg_fmt_test.py::test_adds_long_description_with_readme[readme.txt-text/plain]",
"tests/setup_cfg_fmt_test.py::test_readme_discover_prefers_file_over_directory",
"tests/setup_cfg_fmt_test.py::test_sets_license_file_if_license_exists[LICENSE]",
"tests/setup_cfg_fmt_test.py::test_sets_license_file_if_license_exists[LICENCE]",
"tests/setup_cfg_fmt_test.py::test_sets_license_file_if_license_exists[LICENSE.md]",
"tests/setup_cfg_fmt_test.py::test_sets_license_file_if_license_exists[license.txt]",
"tests/setup_cfg_fmt_test.py::test_license_does_not_match_directories",
"tests/setup_cfg_fmt_test.py::test_rewrite_sets_license_type_and_classifier",
"tests/setup_cfg_fmt_test.py::test_rewrite_identifies_license",
"tests/setup_cfg_fmt_test.py::test_python_requires_left_alone[already",
"tests/setup_cfg_fmt_test.py::test_python_requires_left_alone[weird",
"tests/setup_cfg_fmt_test.py::test_python_requires_left_alone[not",
"tests/setup_cfg_fmt_test.py::test_strips_empty_options_and_sections[only",
"tests/setup_cfg_fmt_test.py::test_strips_empty_options_and_sections[entire",
"tests/setup_cfg_fmt_test.py::test_guess_python_requires_python2_tox_ini",
"tests/setup_cfg_fmt_test.py::test_guess_python_requires_tox_ini_dashed_name",
"tests/setup_cfg_fmt_test.py::test_guess_python_requires_ignores_insufficient_version_envs",
"tests/setup_cfg_fmt_test.py::test_guess_python_requires_from_classifiers",
"tests/setup_cfg_fmt_test.py::test_min_py3_version_updates_python_requires",
"tests/setup_cfg_fmt_test.py::test_min_py3_version_greater_than_minimum",
"tests/setup_cfg_fmt_test.py::test_min_version_removes_classifiers",
"tests/setup_cfg_fmt_test.py::test_python_requires_with_patch_version",
"tests/setup_cfg_fmt_test.py::test_classifiers_left_alone_for_odd_python_requires",
"tests/setup_cfg_fmt_test.py::test_min_py3_version_less_than_minimum",
"tests/setup_cfg_fmt_test.py::test_rewrite_extras",
"tests/setup_cfg_fmt_test.py::test_imp_classifiers_from_tox_ini",
"tests/setup_cfg_fmt_test.py::test_imp_classifiers_no_change",
"tests/setup_cfg_fmt_test.py::test_imp_classifiers_pypy_only",
"tests/setup_cfg_fmt_test.py::test_natural_sort"
] | [] | MIT License | 12,547 | 607 | [
"setup_cfg_fmt.py"
] |
|
borgbackup__borg-6527 | abd3b476de9ea6301fc2d892d377b215b087dd84 | 2022-04-03 14:50:20 | ae417cccf6a2653c010a1f379f3fdef8e55d7ccd | codecov-commenter: # [Codecov](https://codecov.io/gh/borgbackup/borg/pull/6527?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup) Report
> Merging [#6527](https://codecov.io/gh/borgbackup/borg/pull/6527?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup) (58b22e8) into [1.1-maint](https://codecov.io/gh/borgbackup/borg/commit/ae24d527bac40e12d1dfa46d528252db30395f66?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup) (ae24d52) will **decrease** coverage by `0.76%`.
> The diff coverage is `100.00%`.
```diff
@@ Coverage Diff @@
## 1.1-maint #6527 +/- ##
=============================================
- Coverage 79.78% 79.01% -0.77%
=============================================
Files 27 27
Lines 10511 10499 -12
Branches 2151 2029 -122
=============================================
- Hits 8386 8296 -90
- Misses 1601 1667 +66
- Partials 524 536 +12
```
| [Impacted Files](https://codecov.io/gh/borgbackup/borg/pull/6527?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup) | Coverage Δ | |
|---|---|---|
| [src/borg/helpers.py](https://codecov.io/gh/borgbackup/borg/pull/6527/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup#diff-c3JjL2JvcmcvaGVscGVycy5weQ==) | `86.99% <100.00%> (-0.38%)` | :arrow_down: |
| [src/borg/platform/base.py](https://codecov.io/gh/borgbackup/borg/pull/6527/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup#diff-c3JjL2JvcmcvcGxhdGZvcm0vYmFzZS5weQ==) | `79.16% <100.00%> (-2.89%)` | :arrow_down: |
| [src/borg/xattr.py](https://codecov.io/gh/borgbackup/borg/pull/6527/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup#diff-c3JjL2JvcmcveGF0dHIucHk=) | `60.97% <0.00%> (-20.19%)` | :arrow_down: |
| [src/borg/platform/\_\_init\_\_.py](https://codecov.io/gh/borgbackup/borg/pull/6527/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup#diff-c3JjL2JvcmcvcGxhdGZvcm0vX19pbml0X18ucHk=) | `73.68% <0.00%> (-16.32%)` | :arrow_down: |
| [src/borg/nanorst.py](https://codecov.io/gh/borgbackup/borg/pull/6527/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup#diff-c3JjL2JvcmcvbmFub3JzdC5weQ==) | `86.07% <0.00%> (-2.68%)` | :arrow_down: |
| [src/borg/archive.py](https://codecov.io/gh/borgbackup/borg/pull/6527/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup#diff-c3JjL2JvcmcvYXJjaGl2ZS5weQ==) | `80.20% <0.00%> (-1.48%)` | :arrow_down: |
| [src/borg/upgrader.py](https://codecov.io/gh/borgbackup/borg/pull/6527/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup#diff-c3JjL2JvcmcvdXBncmFkZXIucHk=) | `62.06% <0.00%> (-1.15%)` | :arrow_down: |
| [src/borg/repository.py](https://codecov.io/gh/borgbackup/borg/pull/6527/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup#diff-c3JjL2JvcmcvcmVwb3NpdG9yeS5weQ==) | `85.31% <0.00%> (-0.24%)` | :arrow_down: |
| ... and [7 more](https://codecov.io/gh/borgbackup/borg/pull/6527/diff?src=pr&el=tree-more&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup) | |
------
[Continue to review full report at Codecov](https://codecov.io/gh/borgbackup/borg/pull/6527?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/borgbackup/borg/pull/6527?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup). Last update [ffbe366...58b22e8](https://codecov.io/gh/borgbackup/borg/pull/6527?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=borgbackup).
| diff --git a/src/borg/archiver.py b/src/borg/archiver.py
index 83269ff0..e5f8f515 100644
--- a/src/borg/archiver.py
+++ b/src/borg/archiver.py
@@ -1385,37 +1385,19 @@ def _delete_repository(self, args, repository):
if not args.cache_only:
if args.forced == 0: # without --force, we let the user see the archives list and confirm.
- id = bin_to_hex(repository.id)
- location = repository._location.canonical_path()
msg = []
try:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
- n_archives = len(manifest.archives)
- msg.append("You requested to completely DELETE the following repository "
- "*including* {} archives it contains:".format(n_archives))
except NoManifestError:
- n_archives = None
- msg.append("You requested to completely DELETE the following repository "
- "*including* all archives it may contain:")
-
- msg.append(DASHES)
- msg.append("Repository ID: {}".format(id))
- msg.append("Location: {}".format(location))
-
- msg.append("")
- msg.append("Archives:")
-
- if n_archives is not None:
- if n_archives > 0:
- for archive_info in manifest.archives.list(sort_by=['ts']):
- msg.append(format_archive(archive_info))
- else:
- msg.append("This repository seems to not have any archives.")
+ msg.append("You requested to completely DELETE the repository *including* all archives it may "
+ "contain.")
+ msg.append("This repository seems to have no manifest, so we can't tell anything about its "
+ "contents.")
else:
- msg.append("This repository seems to have no manifest, so we can't "
- "tell anything about its contents.")
-
- msg.append(DASHES)
+ msg.append("You requested to completely DELETE the repository *including* all archives it "
+ "contains:")
+ for archive_info in manifest.archives.list(sort_by=['ts']):
+ msg.append(format_archive(archive_info))
msg.append("Type 'YES' if you understand this and want to continue: ")
msg = '\n'.join(msg)
if not yes(msg, false_msg="Aborting.", invalid_msg='Invalid answer, aborting.', truish=('YES',),
diff --git a/src/borg/crypto/key.py b/src/borg/crypto/key.py
index 81e49d72..1b16810b 100644
--- a/src/borg/crypto/key.py
+++ b/src/borg/crypto/key.py
@@ -641,7 +641,7 @@ def decrypt_key_file(self, data, passphrase):
assert enc_key.algorithm == 'sha256'
key = passphrase.kdf(enc_key.salt, enc_key.iterations, 32)
data = AES(is_encrypt=False, key=key).decrypt(enc_key.data)
- if compare_digest(hmac_sha256(key, data), enc_key.hash):
+ if hmac_sha256(key, data) == enc_key.hash:
return data
def encrypt_key_file(self, data, passphrase):
diff --git a/src/borg/helpers.py b/src/borg/helpers.py
index 322d4168..acf828c1 100644
--- a/src/borg/helpers.py
+++ b/src/borg/helpers.py
@@ -1100,24 +1100,33 @@ class Location:
(?P<archive>[^/]+) # archive name must not contain "/"
)?$""" # must match until the end
+ # host NAME, or host IP ADDRESS (v4 or v6, v6 must be in square brackets)
+ host_re = r"""
+ (?P<host>(
+ (?!\[)[^:/]+(?<!\]) # hostname or v4 addr, not containing : or / (does not match v6 addr: no brackets!)
+ |
+ \[[0-9a-fA-F:.]+\]) # ipv6 address in brackets
+ )
+ """
+
# regexes for misc. kinds of supported location specifiers:
ssh_re = re.compile(r"""
- (?P<proto>ssh):// # ssh://
- """ + optional_user_re + r""" # user@ (optional)
- (?P<host>([^:/]+|\[[0-9a-fA-F:.]+\]))(?::(?P<port>\d+))? # host or host:port or [ipv6] or [ipv6]:port
- """ + abs_path_re + optional_archive_re, re.VERBOSE) # path or path::archive
+ (?P<proto>ssh):// # ssh://
+ """ + optional_user_re + host_re + r""" # user@ (optional), host name or address
+ (?::(?P<port>\d+))? # :port (optional)
+ """ + abs_path_re + optional_archive_re, re.VERBOSE) # path or path::archive
file_re = re.compile(r"""
(?P<proto>file):// # file://
""" + file_path_re + optional_archive_re, re.VERBOSE) # servername/path, path or path::archive
- # note: scp_re is also use for local paths
+ # note: scp_re is also used for local paths
scp_re = re.compile(r"""
(
- """ + optional_user_re + r""" # user@ (optional)
- (?P<host>([^:/]+|\[[0-9a-fA-F:.]+\])): # host: (don't match / or [ipv6] in host to disambiguate from file:)
- )? # user@host: part is optional
- """ + scp_path_re + optional_archive_re, re.VERBOSE) # path with optional archive
+ """ + optional_user_re + host_re + r""" # user@ (optional), host name or address
+ : # : (required!)
+ )? # user@host: part is optional
+ """ + scp_path_re + optional_archive_re, re.VERBOSE) # path with optional archive
# get the repo from BORG_REPO env and the optional archive from param.
# if the syntax requires giving REPOSITORY (see "borg mount"),
| scp-style repo URL parsing broken for ip v6 addrs
Reported by @jeanrjc:
Hello, sorry to comment on this issue, but maybe I did not understand something.
The following does not work with borg 1.1.9:
```bash
$ borg check --archives-only backupNC@[2a02:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx]:/media/pi/myCloudDrive/borgBKP --debug
using builtin fallback logging configuration
35 self tests completed in 1.18 seconds
SSH command line: ['ssh', 'backupNC@2a02', 'borg', 'serve', '--umask=077', '--debug']
Remote: ssh: Could not resolve hostname 2a02: Name or service not known
Connection closed by remote host. Is borg working on the server?
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/borg/archiver.py", line 4455, in main
exit_code = archiver.run(args)
File "/usr/lib/python3/dist-packages/borg/archiver.py", line 4387, in run
return set_ec(func(args))
File "/usr/lib/python3/dist-packages/borg/archiver.py", line 134, in wrapper
make_parent_dirs=make_parent_dirs, args=args)
File "/usr/lib/python3/dist-packages/borg/remote.py", line 577, in __init__
raise ConnectionClosedWithHint('Is borg working on the server?') from None
borg.remote.ConnectionClosedWithHint: Connection closed by remote host. Is borg working on the server?
Platform: Linux nextcloudpi 5.10.103-v7l+ #1529 SMP Tue Mar 8 12:24:00 GMT 2022 armv7l
Linux: debian 10.12
Borg: 1.1.9 Python: CPython 3.7.3
PID: 4155 CWD: /home/pi
sys.argv: ['/usr/bin/borg', 'check', '--archives-only', 'backupNC@[2a02:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx]:/media/pi/myCloudDrive/borgBKP', '--debug']
SSH_ORIGINAL_COMMAND: None
```
The "SSH command line" is obviously wrong, what did I miss ?
Thanks
| borgbackup/borg | diff --git a/src/borg/testsuite/helpers.py b/src/borg/testsuite/helpers.py
index fa4af1de..e1dca810 100644
--- a/src/borg/testsuite/helpers.py
+++ b/src/borg/testsuite/helpers.py
@@ -95,6 +95,10 @@ def test_ssh(self, monkeypatch, keys_dir):
assert repr(Location('ssh://user@[2001:db8::192.0.2.1]/some/path')) == \
"Location(proto='ssh', user='user', host='2001:db8::192.0.2.1', port=None, path='/some/path', archive=None)"
assert Location('ssh://user@[2001:db8::192.0.2.1]/some/path').to_key_filename() == keys_dir + '2001_db8__192_0_2_1__some_path'
+ assert repr(Location('ssh://user@[2a02:0001:0002:0003:0004:0005:0006:0007]/some/path')) == \
+ "Location(proto='ssh', user='user', host='2a02:0001:0002:0003:0004:0005:0006:0007', port=None, path='/some/path', archive=None)"
+ assert repr(Location('ssh://user@[2a02:0001:0002:0003:0004:0005:0006:0007]:1234/some/path')) == \
+ "Location(proto='ssh', user='user', host='2a02:0001:0002:0003:0004:0005:0006:0007', port=1234, path='/some/path', archive=None)"
def test_file(self, monkeypatch, keys_dir):
monkeypatch.delenv('BORG_REPO', raising=False)
@@ -127,6 +131,8 @@ def test_scp(self, monkeypatch, keys_dir):
assert repr(Location('user@[2001:db8::192.0.2.1]:/some/path')) == \
"Location(proto='ssh', user='user', host='2001:db8::192.0.2.1', port=None, path='/some/path', archive=None)"
assert Location('user@[2001:db8::192.0.2.1]:/some/path').to_key_filename() == keys_dir + '2001_db8__192_0_2_1__some_path'
+ assert repr(Location('user@[2a02:0001:0002:0003:0004:0005:0006:0007]:/some/path')) == \
+ "Location(proto='ssh', user='user', host='2a02:0001:0002:0003:0004:0005:0006:0007', port=None, path='/some/path', archive=None)"
def test_smb(self, monkeypatch, keys_dir):
monkeypatch.delenv('BORG_REPO', raising=False)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libacl1-dev libssl-dev liblz4-dev libzstd-dev libxxhash-dev libdeflate-dev build-essential pkg-config python3-pkgconfig"
],
"python": "3.9",
"reqs_path": [
"requirements.d/development.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | backports.tarfile==1.2.0
-e git+https://github.com/borgbackup/borg.git@abd3b476de9ea6301fc2d892d377b215b087dd84#egg=borgbackup
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
cryptography==44.0.2
Cython==3.0.12
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
id==1.5.0
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
keyring==25.6.0
markdown-it-py==3.0.0
mdurl==0.1.2
more-itertools==10.6.0
nh3==0.2.21
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
py-cpuinfo==9.0.0
pycparser==2.22
Pygments==2.19.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-benchmark==5.1.0
pytest-cov==6.0.0
pytest-xdist==3.6.1
readme_renderer==44.0
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
SecretStorage==3.3.3
setuptools-scm==8.2.0
tomli==2.2.1
tox==4.25.0
twine==6.1.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: borg
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- backports-tarfile==1.2.0
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- cryptography==44.0.2
- cython==3.0.12
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- id==1.5.0
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- keyring==25.6.0
- markdown-it-py==3.0.0
- mdurl==0.1.2
- more-itertools==10.6.0
- nh3==0.2.21
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- py-cpuinfo==9.0.0
- pycparser==2.22
- pygments==2.19.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-benchmark==5.1.0
- pytest-cov==6.0.0
- pytest-xdist==3.6.1
- readme-renderer==44.0
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- secretstorage==3.3.3
- setuptools-scm==8.2.0
- tomli==2.2.1
- tox==4.25.0
- twine==6.1.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/borg
| [
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_scp"
] | [] | [
"src/borg/testsuite/helpers.py::BigIntTestCase::test_bigint",
"src/borg/testsuite/helpers.py::test_bin_to_hex",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_ssh",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_file",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_smb",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_folder",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_long_path",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_abspath",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_relpath",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_with_colons",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_user_parsing",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_with_timestamp",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_underspecified",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_no_slashes",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_canonical_path",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_format_path",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_bad_syntax",
"src/borg/testsuite/helpers.py::TestLocationWithoutEnv::test_omit_archive",
"src/borg/testsuite/helpers.py::TestLocationWithEnv::test_ssh",
"src/borg/testsuite/helpers.py::TestLocationWithEnv::test_ssh_placeholder",
"src/borg/testsuite/helpers.py::TestLocationWithEnv::test_file",
"src/borg/testsuite/helpers.py::TestLocationWithEnv::test_scp",
"src/borg/testsuite/helpers.py::TestLocationWithEnv::test_folder",
"src/borg/testsuite/helpers.py::TestLocationWithEnv::test_abspath",
"src/borg/testsuite/helpers.py::TestLocationWithEnv::test_relpath",
"src/borg/testsuite/helpers.py::TestLocationWithEnv::test_with_colons",
"src/borg/testsuite/helpers.py::TestLocationWithEnv::test_no_slashes",
"src/borg/testsuite/helpers.py::FormatTimedeltaTestCase::test",
"src/borg/testsuite/helpers.py::test_chunkerparams",
"src/borg/testsuite/helpers.py::MakePathSafeTestCase::test",
"src/borg/testsuite/helpers.py::PruneSplitTestCase::test",
"src/borg/testsuite/helpers.py::IntervalTestCase::test_interval",
"src/borg/testsuite/helpers.py::IntervalTestCase::test_interval_number",
"src/borg/testsuite/helpers.py::IntervalTestCase::test_interval_time_unit",
"src/borg/testsuite/helpers.py::PruneWithinTestCase::test_prune_within",
"src/borg/testsuite/helpers.py::StableDictTestCase::test",
"src/borg/testsuite/helpers.py::TestParseTimestamp::test",
"src/borg/testsuite/helpers.py::test_get_base_dir",
"src/borg/testsuite/helpers.py::test_get_config_dir",
"src/borg/testsuite/helpers.py::test_get_cache_dir",
"src/borg/testsuite/helpers.py::test_get_keys_dir",
"src/borg/testsuite/helpers.py::test_get_security_dir",
"src/borg/testsuite/helpers.py::test_file_size",
"src/borg/testsuite/helpers.py::test_file_size_precision",
"src/borg/testsuite/helpers.py::test_file_size_sign",
"src/borg/testsuite/helpers.py::test_parse_file_size[1-1]",
"src/borg/testsuite/helpers.py::test_parse_file_size[20-20]",
"src/borg/testsuite/helpers.py::test_parse_file_size[5K-5000]",
"src/borg/testsuite/helpers.py::test_parse_file_size[1.75M-1750000]",
"src/borg/testsuite/helpers.py::test_parse_file_size[1e+9-1000000000.0]",
"src/borg/testsuite/helpers.py::test_parse_file_size[-1T--1000000000000.0]",
"src/borg/testsuite/helpers.py::test_parse_file_size_invalid[]",
"src/borg/testsuite/helpers.py::test_parse_file_size_invalid[5",
"src/borg/testsuite/helpers.py::test_parse_file_size_invalid[4E]",
"src/borg/testsuite/helpers.py::test_parse_file_size_invalid[2229",
"src/borg/testsuite/helpers.py::test_parse_file_size_invalid[1B]",
"src/borg/testsuite/helpers.py::test_is_slow_msgpack",
"src/borg/testsuite/helpers.py::TestBuffer::test_type",
"src/borg/testsuite/helpers.py::TestBuffer::test_len",
"src/borg/testsuite/helpers.py::TestBuffer::test_resize",
"src/borg/testsuite/helpers.py::TestBuffer::test_limit",
"src/borg/testsuite/helpers.py::TestBuffer::test_get",
"src/borg/testsuite/helpers.py::test_yes_input",
"src/borg/testsuite/helpers.py::test_yes_input_defaults",
"src/borg/testsuite/helpers.py::test_yes_input_custom",
"src/borg/testsuite/helpers.py::test_yes_env",
"src/borg/testsuite/helpers.py::test_yes_env_default",
"src/borg/testsuite/helpers.py::test_yes_defaults",
"src/borg/testsuite/helpers.py::test_yes_retry",
"src/borg/testsuite/helpers.py::test_yes_no_retry",
"src/borg/testsuite/helpers.py::test_yes_output",
"src/borg/testsuite/helpers.py::test_yes_env_output",
"src/borg/testsuite/helpers.py::test_progress_percentage_sameline",
"src/borg/testsuite/helpers.py::test_progress_percentage_step",
"src/borg/testsuite/helpers.py::test_progress_percentage_quiet",
"src/borg/testsuite/helpers.py::test_progress_endless",
"src/borg/testsuite/helpers.py::test_progress_endless_step",
"src/borg/testsuite/helpers.py::test_partial_format",
"src/borg/testsuite/helpers.py::test_chunk_file_wrapper",
"src/borg/testsuite/helpers.py::test_chunkit",
"src/borg/testsuite/helpers.py::test_clean_lines",
"src/borg/testsuite/helpers.py::test_format_line",
"src/borg/testsuite/helpers.py::test_format_line_erroneous",
"src/borg/testsuite/helpers.py::test_replace_placeholders",
"src/borg/testsuite/helpers.py::test_override_placeholders",
"src/borg/testsuite/helpers.py::test_swidth_slice",
"src/borg/testsuite/helpers.py::test_swidth_slice_mixed_characters",
"src/borg/testsuite/helpers.py::test_safe_timestamps",
"src/borg/testsuite/helpers.py::TestPopenWithErrorHandling::test_simple",
"src/borg/testsuite/helpers.py::TestPopenWithErrorHandling::test_not_found",
"src/borg/testsuite/helpers.py::TestPopenWithErrorHandling::test_bad_syntax[mismatched",
"src/borg/testsuite/helpers.py::TestPopenWithErrorHandling::test_bad_syntax[foo",
"src/borg/testsuite/helpers.py::TestPopenWithErrorHandling::test_bad_syntax[]",
"src/borg/testsuite/helpers.py::TestPopenWithErrorHandling::test_shell",
"src/borg/testsuite/helpers.py::test_dash_open",
"src/borg/testsuite/helpers.py::test_safe_unlink_is_safe",
"src/borg/testsuite/helpers.py::test_safe_unlink_is_safe_ENOSPC"
] | [] | BSD License | 12,551 | 1,459 | [
"src/borg/archiver.py",
"src/borg/crypto/key.py",
"src/borg/helpers.py"
] |
conan-io__conan-10960 | 55d7209c9c89c0ead9c887dbb0fe4ad6b66953ff | 2022-04-04 15:21:31 | fba42152d18038a848c85b749d3a87c8b7749210 | diff --git a/conan/tools/apple/xcodedeps.py b/conan/tools/apple/xcodedeps.py
index bae9fc71c..d31664465 100644
--- a/conan/tools/apple/xcodedeps.py
+++ b/conan/tools/apple/xcodedeps.py
@@ -146,11 +146,11 @@ class XcodeDeps(object):
'system_libs': " ".join("-l{}".format(sys_lib) for sys_lib in cpp_info.system_libs),
'frameworksdirs': " ".join('"{}"'.format(p) for p in cpp_info.frameworkdirs),
'frameworks': " ".join("-framework {}".format(framework) for framework in cpp_info.frameworks),
- 'definitions': " ".join(cpp_info.defines),
- 'c_compiler_flags': " ".join(cpp_info.cflags),
- 'cxx_compiler_flags': " ".join(cpp_info.cxxflags),
- 'linker_flags': " ".join(cpp_info.sharedlinkflags),
- 'exe_flags': " ".join(cpp_info.exelinkflags),
+ 'definitions': " ".join('"{}"'.format(p.replace('"', '\\"')) for p in cpp_info.defines),
+ 'c_compiler_flags': " ".join('"{}"'.format(p.replace('"', '\\"')) for p in cpp_info.cflags),
+ 'cxx_compiler_flags': " ".join('"{}"'.format(p.replace('"', '\\"')) for p in cpp_info.cxxflags),
+ 'linker_flags': " ".join('"{}"'.format(p.replace('"', '\\"')) for p in cpp_info.sharedlinkflags),
+ 'exe_flags': " ".join('"{}"'.format(p.replace('"', '\\"')) for p in cpp_info.exelinkflags),
'condition': _xcconfig_conditional(self._conanfile.settings)
}
formatted_template = Template(self._vars_xconfig).render(**fields)
diff --git a/conan/tools/cmake/cmake.py b/conan/tools/cmake/cmake.py
index 3d4def08b..e6d04073f 100644
--- a/conan/tools/cmake/cmake.py
+++ b/conan/tools/cmake/cmake.py
@@ -48,12 +48,11 @@ class CMake(object):
are passed to the command line, plus the ``--config Release`` for builds in multi-config
"""
- def __init__(self, conanfile, namespace=None):
+ def __init__(self, conanfile):
_validate_recipe(conanfile)
# Store a reference to useful data
self._conanfile = conanfile
- self._namespace = namespace
cmake_presets = load_cmake_presets(conanfile.generators_folder)
self._generator = cmake_presets["configurePresets"][0]["generator"]
diff --git a/conan/tools/cmake/toolchain/blocks.py b/conan/tools/cmake/toolchain/blocks.py
index 655fba520..a55a5c19c 100644
--- a/conan/tools/cmake/toolchain/blocks.py
+++ b/conan/tools/cmake/toolchain/blocks.py
@@ -157,7 +157,7 @@ class GLibCXXBlock(Block):
string(APPEND CONAN_CXX_FLAGS " {{ set_libcxx }}")
{% endif %}
{% if glibcxx %}
- add_definitions(-D_GLIBCXX_USE_CXX11_ABI={{ glibcxx }})
+ add_compile_definitions(_GLIBCXX_USE_CXX11_ABI={{ glibcxx }})
{% endif %}
""")
@@ -283,10 +283,10 @@ class AndroidSystemBlock(Block):
template = textwrap.dedent("""
# New toolchain things
- set(ANDROID_PLATFORM {{ ANDROID_PLATFORM }})
- set(ANDROID_STL {{ ANDROID_STL }})
- set(ANDROID_ABI {{ ANDROID_ABI }})
- include({{ ANDROID_NDK_PATH }}/build/cmake/android.toolchain.cmake)
+ set(ANDROID_PLATFORM {{ android_platform }})
+ set(ANDROID_STL {{ android_stl }})
+ set(ANDROID_ABI {{ android_abi }})
+ include({{ android_ndk_path }}/build/cmake/android.toolchain.cmake)
""")
def context(self):
@@ -309,10 +309,10 @@ class AndroidSystemBlock(Block):
android_ndk_path = android_ndk_path.replace("\\", "/")
ctxt_toolchain = {
- 'ANDROID_PLATFORM': self._conanfile.settings.os.api_level,
- 'ANDROID_ABI': android_abi,
- 'ANDROID_STL': libcxx_str,
- 'ANDROID_NDK_PATH': android_ndk_path,
+ 'android_platform': self._conanfile.settings.os.api_level,
+ 'android_abi': android_abi,
+ 'android_stl': libcxx_str,
+ 'android_ndk_path': android_ndk_path,
}
return ctxt_toolchain
@@ -320,13 +320,13 @@ class AndroidSystemBlock(Block):
class AppleSystemBlock(Block):
template = textwrap.dedent("""
# Set the architectures for which to build.
- set(CMAKE_OSX_ARCHITECTURES {{ CMAKE_OSX_ARCHITECTURES }} CACHE STRING "" FORCE)
+ set(CMAKE_OSX_ARCHITECTURES {{ cmake_osx_architectures }} CACHE STRING "" FORCE)
# Setting CMAKE_OSX_SYSROOT SDK, when using Xcode generator the name is enough
# but full path is necessary for others
- set(CMAKE_OSX_SYSROOT {{ CMAKE_OSX_SYSROOT }} CACHE STRING "" FORCE)
- {% if CMAKE_OSX_DEPLOYMENT_TARGET is defined %}
+ set(CMAKE_OSX_SYSROOT {{ cmake_osx_sysroot }} CACHE STRING "" FORCE)
+ {% if cmake_osx_deployment_target is defined %}
# Setting CMAKE_OSX_DEPLOYMENT_TARGET if "os.version" is defined by the used conan profile
- set(CMAKE_OSX_DEPLOYMENT_TARGET "{{ CMAKE_OSX_DEPLOYMENT_TARGET }}" CACHE STRING "")
+ set(CMAKE_OSX_DEPLOYMENT_TARGET "{{ cmake_osx_deployment_target }}" CACHE STRING "")
{% endif %}
""")
@@ -367,16 +367,16 @@ class AppleSystemBlock(Block):
ctxt_toolchain = {}
if host_sdk_name:
- ctxt_toolchain["CMAKE_OSX_SYSROOT"] = host_sdk_name
+ ctxt_toolchain["cmake_osx_sysroot"] = host_sdk_name
# this is used to initialize the OSX_ARCHITECTURES property on each target as it is created
if host_architecture:
- ctxt_toolchain["CMAKE_OSX_ARCHITECTURES"] = host_architecture
+ ctxt_toolchain["cmake_osx_architectures"] = host_architecture
if host_os_version:
# https://cmake.org/cmake/help/latest/variable/CMAKE_OSX_DEPLOYMENT_TARGET.html
# Despite the OSX part in the variable name(s) they apply also to other SDKs than
# macOS like iOS, tvOS, or watchOS.
- ctxt_toolchain["CMAKE_OSX_DEPLOYMENT_TARGET"] = host_os_version
+ ctxt_toolchain["cmake_osx_deployment_target"] = host_os_version
return ctxt_toolchain
@@ -545,7 +545,7 @@ class ExtraFlagsBlock(Block):
string(APPEND CONAN_EXE_LINKER_FLAGS "{% for exelinkflag in exelinkflags %} {{ exelinkflag }}{% endfor %}")
{% endif %}
{% if defines %}
- add_definitions({% for define in defines %} {{ define }}{% endfor %})
+ add_compile_definitions({% for define in defines %} {{ define }}{% endfor %})
{% endif %}
""")
@@ -561,7 +561,7 @@ class ExtraFlagsBlock(Block):
"cflags": cflags,
"sharedlinkflags": sharedlinkflags,
"exelinkflags": exelinkflags,
- "defines": ["-D{}".format(d) for d in defines]
+ "defines": defines
}
diff --git a/conan/tools/cmake/toolchain/toolchain.py b/conan/tools/cmake/toolchain/toolchain.py
index 723be7c0d..0c148c107 100644
--- a/conan/tools/cmake/toolchain/toolchain.py
+++ b/conan/tools/cmake/toolchain/toolchain.py
@@ -70,8 +70,8 @@ class CMakeToolchain(object):
{% if action=='set' %}
set({{ it }} {{ genexpr.str }} CACHE STRING
"Variable {{ it }} conan-toolchain defined")
- {% elif action=='add_definitions' %}
- add_definitions(-D{{ it }}={{ genexpr.str }})
+ {% elif action=='add_compile_definitions' %}
+ add_compile_definitions({{ it }}={{ genexpr.str }})
{% endif %}
{% endfor %}
{% endmacro %}
@@ -106,17 +106,15 @@ class CMakeToolchain(object):
# Preprocessor definitions
{% for it, value in preprocessor_definitions.items() %}
- # add_compile_definitions only works in cmake >= 3.12
- add_definitions(-D{{ it }}={{ value }})
+ add_compile_definitions({{ it }}={{ value }})
{% endfor %}
# Preprocessor definitions per configuration
- {{ iterate_configs(preprocessor_definitions_config, action='add_definitions') }}
+ {{ iterate_configs(preprocessor_definitions_config, action='add_compile_definitions') }}
""")
- def __init__(self, conanfile, generator=None, namespace=None):
+ def __init__(self, conanfile, generator=None):
self._conanfile = conanfile
self.generator = self._get_generator(generator)
- self._namespace = namespace
self.variables = Variables()
self.preprocessor_definitions = Variables()
diff --git a/conan/tools/scm/git.py b/conan/tools/scm/git.py
index 2e6b24e0f..01a90764b 100644
--- a/conan/tools/scm/git.py
+++ b/conan/tools/scm/git.py
@@ -19,7 +19,10 @@ class Git(object):
try:
# commit = self._run("rev-parse HEAD") For the whole repo
# This rev-list knows to capture the last commit for the folder
- commit = self._run('rev-list HEAD -n 1 -- "{}"'.format(self.folder))
+ # --full-history is needed to not avoid wrong commits:
+ # https://github.com/conan-io/conan/issues/10971
+ # https://git-scm.com/docs/git-rev-list#Documentation/git-rev-list.txt-Defaultmode
+ commit = self._run('rev-list HEAD -n 1 --full-history -- "{}"'.format(self.folder))
return commit
except Exception as e:
raise ConanException("Unable to get git commit in '%s': %s" % (self.folder, str(e)))
diff --git a/conans/client/generators/json_generator.py b/conans/client/generators/json_generator.py
index fa7e0e174..ac2862053 100644
--- a/conans/client/generators/json_generator.py
+++ b/conans/client/generators/json_generator.py
@@ -3,26 +3,6 @@ import json
from conans.model import Generator
-def serialize_cpp_info(cpp_info):
- keys = [
- "version",
- "description",
- "rootpath",
- "sysroot",
- "include_paths", "lib_paths", "bin_paths", "build_paths", "res_paths",
- "libs",
- "system_libs",
- "defines", "cflags", "cxxflags", "sharedlinkflags", "exelinkflags",
- "frameworks", "framework_paths", "names", "filenames",
- "build_modules", "build_modules_paths"
- ]
- res = {}
- for key in keys:
- res[key] = getattr(cpp_info, key)
- res["cppflags"] = cpp_info.cxxflags # Backwards compatibility
- return res
-
-
def serialize_user_info(user_info):
res = {}
for key, value in user_info.items():
@@ -51,10 +31,10 @@ class JsonGenerator(Generator):
def get_dependencies_info(self):
res = []
for depname, cpp_info in self.deps_build_info.dependencies:
- serialized_info = serialize_cpp_info(cpp_info)
- serialized_info["name"] = depname
+ serialized_info = self.serialize_cpp_info(depname, cpp_info)
for cfg, cfg_cpp_info in cpp_info.configs.items():
- serialized_info.setdefault("configs", {})[cfg] = serialize_cpp_info(cfg_cpp_info)
+ serialized_info.setdefault("configs", {})[cfg] = self.serialize_cpp_info(depname,
+ cfg_cpp_info)
res.append(serialized_info)
return res
@@ -71,3 +51,31 @@ class JsonGenerator(Generator):
for key, value in self.conanfile.options[req].items():
options[req][key] = value
return options
+
+ def serialize_cpp_info(self, depname, cpp_info):
+ keys = [
+ "version",
+ "description",
+ "rootpath",
+ "sysroot",
+ "include_paths", "lib_paths", "bin_paths", "build_paths", "res_paths",
+ "libs",
+ "system_libs",
+ "defines", "cflags", "cxxflags", "sharedlinkflags", "exelinkflags",
+ "frameworks", "framework_paths", "names", "filenames",
+ "build_modules", "build_modules_paths"
+ ]
+ res = {}
+ for key in keys:
+ res[key] = getattr(cpp_info, key)
+ res["cppflags"] = cpp_info.cxxflags # Backwards compatibility
+ res["name"] = depname
+
+ # FIXME: trick for NewCppInfo objects when declared layout
+ try:
+ if cpp_info.version is None:
+ res["version"] = self.conanfile.dependencies.get(depname).ref.version
+ except Exception:
+ pass
+
+ return res
diff --git a/conans/client/subsystems.py b/conans/client/subsystems.py
index d75a9ba62..1ad488677 100644
--- a/conans/client/subsystems.py
+++ b/conans/client/subsystems.py
@@ -13,17 +13,20 @@ WSL = 'wsl' # Windows Subsystem for Linux
SFU = 'sfu' # Windows Services for UNIX
-def run_in_windows_bash(conanfile, command, cwd=None, env=None):
+def run_in_windows_bash(conanfile, command, cwd=None, env="conanbuild"):
from conan.tools.env import Environment
from conan.tools.env.environment import environment_wrap_command
""" Will run a unix command inside a bash terminal It requires to have MSYS2, CYGWIN, or WSL"""
+ env_win = []
+ env_shell = []
if env:
- # Passing env invalidates the conanfile.environment_scripts
- env_win = [env] if not isinstance(env, list) else env
- env_shell = []
- else:
- env_shell = ["conanbuild.sh"]
- env_win = ["conanbuild.bat"]
+ if env == "conanbuild":
+ env_shell = ["conanbuild.sh"]
+ env_win = ["conanbuild.bat"]
+ else:
+ # Passing env invalidates the conanfile.environment_scripts
+ env_win = [env] if not isinstance(env, list) else env
+ env_shell = []
subsystem = conanfile.conf.get("tools.microsoft.bash:subsystem")
shell_path = conanfile.conf.get("tools.microsoft.bash:path")
diff --git a/conans/model/conan_file.py b/conans/model/conan_file.py
index bee72feb1..b33036f8f 100644
--- a/conans/model/conan_file.py
+++ b/conans/model/conan_file.py
@@ -376,7 +376,7 @@ class ConanFile(object):
"""
def run(self, command, output=True, cwd=None, win_bash=False, subsystem=None, msys_mingw=True,
- ignore_errors=False, run_environment=False, with_login=True, env=None):
+ ignore_errors=False, run_environment=False, with_login=True, env="conanbuild"):
# NOTE: "self.win_bash" is the new parameter "win_bash" for Conan 2.0
def _run(cmd, _env):
@@ -388,10 +388,11 @@ class ConanFile(object):
elif self.win_bash: # New, Conan 2.0
from conans.client.subsystems import run_in_windows_bash
return run_in_windows_bash(self, command=cmd, cwd=cwd, env=_env)
- if _env is None:
- _env = "conanbuild"
from conan.tools.env.environment import environment_wrap_command
- wrapped_cmd = environment_wrap_command(_env, cmd, cwd=self.generators_folder)
+ if env:
+ wrapped_cmd = environment_wrap_command(_env, cmd, cwd=self.generators_folder)
+ else:
+ wrapped_cmd = cmd
return self._conan_runner(wrapped_cmd, output, os.path.abspath(RUN_LOG_NAME), cwd)
if run_environment:
diff --git a/conans/model/info.py b/conans/model/info.py
index d1a98c6a3..80bd185a2 100644
--- a/conans/model/info.py
+++ b/conans/model/info.py
@@ -349,6 +349,9 @@ class PythonRequireInfo(object):
self._channel = self._ref.channel
self._revision = self._ref.revision
+ def unrelated_mode(self):
+ self._name = self._version = self._user = self._channel = self._revision = None
+
class PythonRequiresInfo(object):
| [bug] version is not set correctly when using layout
When layout is being used, recipe version is not set correctly somehow using json generator, it seems that version is not being fetched from package metadata when running conan install command!
### Environment Details
* Operating System+version: macos
* Compiler+version: apple-clang 12.0
* Conan version: Conan version 1.47.0
* Python version: 3.9
### Steps to reproduce
* create a conan demo project using `conan new demo/1.0.0 --template=cmake_lib`
* create a local conan package `conan create .`
* generate deps using json generator `conan install demo/1.0.0@ -g json`
* inspect conanbuildinfo.json, version is set to null, however it should be 1.0.0
* remove the layout method from the conanfile.py and try again
* now version is set correctly
btw, it seems to be the same issue for the description attribute, maybe other attributes as well

| conan-io/conan | diff --git a/conans/test/functional/toolchains/apple/test_xcodedeps_build_configs.py b/conans/test/functional/toolchains/apple/test_xcodedeps_build_configs.py
index 818a5cfd5..2e514ebca 100644
--- a/conans/test/functional/toolchains/apple/test_xcodedeps_build_configs.py
+++ b/conans/test/functional/toolchains/apple/test_xcodedeps_build_configs.py
@@ -387,3 +387,36 @@ def test_xcodedeps_dashes_names_and_arch():
assert os.path.exists(os.path.join(client.current_folder,
"conan_hello_dashes_vars_release_arm64.xcconfig"))
client.run_command("xcodebuild -project app.xcodeproj -xcconfig conandeps.xcconfig -arch arm64")
+
+
[email protected](platform.system() != "Darwin", reason="Only for MacOS")
[email protected]_xcodebuild
+def test_xcodedeps_definitions_escape():
+ client = TestClient(path_with_spaces=False)
+ conanfile = textwrap.dedent('''
+ from conans import ConanFile
+
+ class HelloLib(ConanFile):
+ def package_info(self):
+ self.cpp_info.defines.append("USER_CONFIG=\\"user_config.h\\"")
+ self.cpp_info.defines.append('OTHER="other.h"')
+ ''')
+ client.save({"conanfile.py": conanfile})
+ client.run("export . hello/1.0@")
+ client.save({"conanfile.txt": "[requires]\nhello/1.0\n"}, clean_first=True)
+ main = textwrap.dedent("""
+ #include <stdio.h>
+ #define STR(x) #x
+ #define SHOW_DEFINE(x) printf("%s=%s", #x, STR(x))
+ int main(int argc, char *argv[]) {
+ SHOW_DEFINE(USER_CONFIG);
+ SHOW_DEFINE(OTHER);
+ return 0;
+ }
+ """)
+ create_xcode_project(client, "app", main)
+ client.run("install . --build=missing -g XcodeDeps")
+ client.run_command("xcodebuild -project app.xcodeproj -xcconfig conandeps.xcconfig")
+ client.run_command("build/Release/app")
+ assert 'USER_CONFIG="user_config.h"' in client.out
+ assert 'OTHER="other.h"' in client.out
diff --git a/conans/test/integration/environment/test_env.py b/conans/test/integration/environment/test_env.py
index 92c0bbfb3..6d8cbc991 100644
--- a/conans/test/integration/environment/test_env.py
+++ b/conans/test/integration/environment/test_env.py
@@ -545,3 +545,47 @@ def test_deactivate_location():
assert os.path.exists(os.path.join(client.current_folder, "myfolder",
"deactivate_conanbuildenv-release-x86_64{}".format(script_ext)))
+
+
[email protected](platform.system() == "Windows", reason="Requires sh")
+def test_skip_virtualbuildenv_run():
+ # Build require
+ conanfile = textwrap.dedent(r"""
+ from conan import ConanFile
+ class Pkg(ConanFile):
+ def package_info(self):
+ self.buildenv_info.define("FOO", "BAR")
+ """)
+ client = TestClient()
+ client.save({"pkg.py": conanfile})
+ # client.run("create pkg.py --name pkg --version 1.0")
+ client.run("create pkg.py pkg/1.0@ -pr:h=default -pr:b=default")
+
+ # consumer
+ conanfile = textwrap.dedent(r"""
+ import os
+ from conan import ConanFile
+ class Consumer(ConanFile):
+ tool_requires = "pkg/1.0"
+ exports_sources = "my_script.sh"
+ # This can be removed at Conan 2
+ generators = "VirtualBuildEnv"
+ def build(self):
+ path = os.path.join(self.source_folder, "my_script.sh")
+ os.chmod(path, 0o777)
+ self.run("'{}'".format(path))
+ """)
+ my_script = 'echo FOO is $FOO'
+ client.save({"conanfile.py": conanfile, "my_script.sh": my_script})
+ # client.run("create . --name consumer --version 1.0")
+ client.run("create . consumer/1.0@ -pr:h=default -pr:b=default")
+ assert "FOO is BAR" in client.out
+
+ # If we pass env=None no "conanbuild" is applied
+ # self.run("'{}'".format(path), env=None)
+ conanfile = conanfile.replace(".format(path))",
+ ".format(path), env=None)")
+ client.save({"conanfile.py": conanfile})
+ # client.run("create . --name consumer --version 1.0")
+ client.run("create . consumer/1.0@ -pr:h=default -pr:b=default")
+ assert "FOO is BAR" not in client.out
diff --git a/conans/test/integration/generators/json_test.py b/conans/test/integration/generators/json_test.py
index dbdcf5bd7..17e835cc7 100644
--- a/conans/test/integration/generators/json_test.py
+++ b/conans/test/integration/generators/json_test.py
@@ -13,6 +13,9 @@ class JsonTest(unittest.TestCase):
class HelloConan(ConanFile):
exports_sources = "*.h"
+ description = "foo"
+ def layout(self):
+ pass
def package(self):
self.copy("*.h", dst="include")
def package_info(self):
@@ -26,7 +29,8 @@ class HelloConan(ConanFile):
client.run("install Hello/0.1@lasote/testing -g json")
conan_json = client.load("conanbuildinfo.json")
data = json.loads(conan_json)
-
+ self.assertEqual(data["dependencies"][0]["version"], "0.1")
+ self.assertIsNone(data["dependencies"][0]["description"])
self.assertEqual(data["deps_env_info"]["MY_ENV_VAR"], "foo")
self.assertEqual(data["deps_user_info"]["Hello"]["my_var"], "my_value")
@@ -103,9 +107,6 @@ class HelloConan(ConanFile):
self.assertEqual(deps_info_release["libs"], ["Hello"])
# FIXME: There are _null_ nodes
- self.assertEqual(deps_info_debug["version"], None)
- self.assertEqual(deps_info_release["version"], None)
-
self.assertEqual(deps_info_debug["description"], None)
self.assertEqual(deps_info_release["description"], None)
diff --git a/conans/test/integration/package_id/python_requires_package_id_test.py b/conans/test/integration/package_id/python_requires_package_id_test.py
index bb9a67fd7..716e7b5c4 100644
--- a/conans/test/integration/package_id/python_requires_package_id_test.py
+++ b/conans/test/integration/package_id/python_requires_package_id_test.py
@@ -50,6 +50,19 @@ class PythonRequiresPackageIDTest(unittest.TestCase):
self.assertIn("tool/1.1.2", self.client2.out)
self.assertIn("pkg/0.1:387c1c797a011d426ecb25a1e01b28251e443ec8 - Build", self.client2.out)
+ def test_unrelated_conf(self):
+ # change the policy in conan.conf
+ self.client2.run("config set general.default_python_requires_id_mode=unrelated_mode")
+ self.client2.run("create . pkg/0.1@")
+ self.assertIn("tool/1.1.1", self.client2.out)
+ self.assertIn("pkg/0.1:c941ae50e2daf4a118c393591cfef6a55cd1cfad - Build", self.client2.out)
+
+ # with any change the package id doesn't change
+ self.client.run("export . tool/1.1.2@")
+ self.client2.run("create . pkg/0.1@ --build missing")
+ self.assertIn("tool/1.1.2", self.client2.out)
+ self.assertIn("pkg/0.1:c941ae50e2daf4a118c393591cfef6a55cd1cfad - Cache", self.client2.out)
+
def test_change_mode_package_id(self):
# change the policy in package_id
conanfile = textwrap.dedent("""
diff --git a/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py b/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py
index 6dda9c683..dd62ef3dc 100644
--- a/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py
+++ b/conans/test/integration/toolchains/cmake/test_cmaketoolchain.py
@@ -296,7 +296,7 @@ def test_apple_vars_overwrite_user_conf():
assert "CMAKE_SYSTEM_PROCESSOR x86_64" in toolchain
assert "CMAKE_SYSTEM_PROCESSOR armv8" not in toolchain
-
+
def test_extra_flags_via_conf():
profile = textwrap.dedent("""
[settings]
@@ -327,4 +327,4 @@ def test_extra_flags_via_conf():
assert 'string(APPEND CONAN_C_FLAGS " --flag3 --flag4")' in toolchain
assert 'string(APPEND CONAN_SHARED_LINKER_FLAGS " --flag5 --flag6")' in toolchain
assert 'string(APPEND CONAN_EXE_LINKER_FLAGS " --flag7 --flag8")' in toolchain
- assert 'add_definitions( -DD1 -DD2)' in toolchain
+ assert 'add_compile_definitions( D1 D2)' in toolchain
diff --git a/conans/test/integration/toolchains/test_toolchain_namespaces.py b/conans/test/integration/toolchains/test_toolchain_namespaces.py
index 57a40afb8..b98f8b1aa 100644
--- a/conans/test/integration/toolchains/test_toolchain_namespaces.py
+++ b/conans/test/integration/toolchains/test_toolchain_namespaces.py
@@ -2,39 +2,10 @@ import os
import textwrap
from conan.tools import CONAN_TOOLCHAIN_ARGS_FILE
-from conan.tools.cmake.presets import load_cmake_presets
from conan.tools.files.files import load_toolchain_args
from conans.test.utils.tools import TestClient
-def test_cmake_namespace():
- client = TestClient()
- namespace = "somename"
- conanfile = textwrap.dedent("""
- from conans import ConanFile
- from conan.tools.cmake import CMakeToolchain, CMake
-
- class Conan(ConanFile):
- settings = "os", "arch", "compiler", "build_type"
- def generate(self):
- cmake = CMakeToolchain(self, namespace='{0}')
- cmake.generate()
- def build(self):
- cmake = CMake(self, namespace='{0}')
- self.output.info(cmake._generator)
- self.output.info(cmake._toolchain_file)
- """.format(namespace))
-
- client.save({"conanfile.py": conanfile})
- client.run("install . ")
- presets = load_cmake_presets(client.current_folder)
- toolchain_file = presets["configurePresets"][0]["toolchainFile"]
- generator = presets["configurePresets"][0]["generator"]
- client.run("build . ")
- assert generator in client.out
- assert toolchain_file in client.out
-
-
def test_bazel_namespace():
client = TestClient()
namespace = "somename"
diff --git a/conans/test/unittests/tools/cmake/test_cmaketoolchain.py b/conans/test/unittests/tools/cmake/test_cmaketoolchain.py
index da9d29b66..36c2b5e20 100644
--- a/conans/test/unittests/tools/cmake/test_cmaketoolchain.py
+++ b/conans/test/unittests/tools/cmake/test_cmaketoolchain.py
@@ -386,7 +386,7 @@ def test_libcxx_abi_flag():
toolchain = CMakeToolchain(c)
content = toolchain.content
- assert '-D_GLIBCXX_USE_CXX11_ABI=0' in content
+ assert '_GLIBCXX_USE_CXX11_ABI=0' in content
c.settings.compiler.libcxx = "libstdc++11"
toolchain = CMakeToolchain(c)
content = toolchain.content
@@ -395,13 +395,13 @@ def test_libcxx_abi_flag():
# recipe workaround for older distros
toolchain.blocks["libcxx"].values["glibcxx"] = "1"
content = toolchain.content
- assert '-D_GLIBCXX_USE_CXX11_ABI=1' in content
+ assert '_GLIBCXX_USE_CXX11_ABI=1' in content
# but maybe the conf is better
c.conf["tools.gnu:define_libcxx11_abi"] = True
toolchain = CMakeToolchain(c)
content = toolchain.content
- assert '-D_GLIBCXX_USE_CXX11_ABI=1' in content
+ assert '_GLIBCXX_USE_CXX11_ABI=1' in content
@pytest.mark.parametrize("os,os_sdk,arch,expected_sdk", [
diff --git a/conans/test/unittests/tools/scm/__init__.py b/conans/test/unittests/tools/scm/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/conans/test/unittests/tools/scm/test_git_get_commit.py b/conans/test/unittests/tools/scm/test_git_get_commit.py
new file mode 100644
index 000000000..be652adc1
--- /dev/null
+++ b/conans/test/unittests/tools/scm/test_git_get_commit.py
@@ -0,0 +1,99 @@
+import os
+
+from conan.tools.scm import Git
+from conans.test.utils.mocks import MockConanfile
+from conans.test.utils.tools import TestClient
+
+
+def test_change_branch_in_root_commit():
+ """
+ https://github.com/conan-io/conan/issues/10971#issuecomment-1089316912
+ """
+ c = TestClient()
+ conanfile = MockConanfile({})
+ c.save({"root.txt": "", "subfolder/subfolder.txt": ""})
+ c.run_command("git init .")
+ c.run_command('git config user.name myname')
+ c.run_command('git config user.email [email protected]')
+ c.run_command("git add .")
+ c.run_command('git commit -m "initial commit"')
+ c.run_command("git checkout -b change_branch")
+ c.save({"subfolder/subfolder.txt": "CHANGED"})
+ c.run_command("git add .")
+ c.run_command('git commit -m "second commit"')
+ c.run_command("git checkout master")
+ c.run_command('git merge --no-ff change_branch -m "Merge branch"')
+
+ git = Git(conanfile, folder=c.current_folder)
+ commit_conan = git.get_commit()
+
+ c.run_command("git rev-parse HEAD")
+ commit_real = str(c.out).splitlines()[0]
+ assert commit_conan == commit_real
+
+
+def test_multi_folder_repo():
+ c = TestClient()
+ conanfile = MockConanfile({})
+ c.save({"lib_a/conanfile.py": ""})
+ c.run_command("git init .")
+ c.run_command('git config user.name myname')
+ c.run_command('git config user.email [email protected]')
+ c.run_command("git add .")
+ c.run_command('git commit -m "lib_a commit"')
+ c.save({"lib_b/conanfile.py": ""})
+ c.run_command("git add .")
+ c.run_command('git commit -m "lib_b commit"')
+ c.save({"lib_c/conanfile.py": ""})
+ c.run_command("git add .")
+ c.run_command('git commit -m "lib_c commit"')
+ c.save({"root_change": ""})
+ c.run_command("git add .")
+ c.run_command('git commit -m "root change"')
+
+ # Git object for lib_a
+ git = Git(conanfile, folder=os.path.join(c.current_folder, "lib_a"))
+ commit_libA = git.get_commit()
+
+ git = Git(conanfile, folder=os.path.join(c.current_folder, "lib_b"))
+ commit_libB = git.get_commit()
+
+ git = Git(conanfile, folder=os.path.join(c.current_folder, "lib_c"))
+ commit_libC = git.get_commit()
+
+ git = Git(conanfile, folder=c.current_folder)
+ commit_root = git.get_commit()
+
+ # All different
+ assert len({commit_libA, commit_libB, commit_libC, commit_root}) == 4
+
+ c.run_command("git rev-parse HEAD")
+ commit_real = str(c.out).splitlines()[0]
+ assert commit_root == commit_real
+
+ # New commit in A
+ c.save({"lib_a/conanfile.py": "CHANGED"})
+ c.run_command("git add .")
+ c.run_command('git commit -m "lib_a commit2"')
+
+ # Git object for lib_a
+ git = Git(conanfile, folder=os.path.join(c.current_folder, "lib_a"))
+ new_commit_libA = git.get_commit()
+
+ git = Git(conanfile, folder=os.path.join(c.current_folder, "lib_b"))
+ new_commit_libB = git.get_commit()
+
+ git = Git(conanfile, folder=os.path.join(c.current_folder, "lib_c"))
+ new_commit_libC = git.get_commit()
+
+ git = Git(conanfile, folder=c.current_folder)
+ new_commit_root = git.get_commit()
+
+ assert new_commit_libA != commit_libA
+ assert new_commit_libB == commit_libB
+ assert new_commit_libC == commit_libC
+ assert new_commit_root != commit_root
+
+ c.run_command("git rev-parse HEAD")
+ commit_real = str(c.out).splitlines()[0]
+ assert new_commit_root == commit_real
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 9
} | 1.47 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.8",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
beautifulsoup4==4.13.3
bottle==0.12.25
certifi==2025.1.31
charset-normalizer==3.4.1
colorama==0.4.6
-e git+https://github.com/conan-io/conan.git@55d7209c9c89c0ead9c887dbb0fe4ad6b66953ff#egg=conan
distro==1.6.0
execnet==2.1.1
fasteners==0.19
idna==3.10
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==2.1.5
mock==1.3.0
node-semver==0.6.1
packaging==24.2
parameterized==0.9.0
patch-ng==1.17.4
pbr==6.1.1
pluggy==1.5.0
pluginbase==1.0.1
py==1.11.0
Pygments==2.19.1
PyJWT==1.7.1
pytest==6.2.5
pytest-xdist==3.5.0
python-dateutil==2.9.0.post0
PyYAML==5.4.1
requests==2.32.3
six==1.16.0
soupsieve==2.6
toml==0.10.2
tqdm==4.67.1
typing_extensions==4.13.0
urllib3==1.26.20
waitress==3.0.0
WebOb==1.8.9
WebTest==2.0.35
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=24.2=py38h06a4308_0
- python=3.8.20=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.1.0=py38h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.44.0=py38h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- beautifulsoup4==4.13.3
- bottle==0.12.25
- certifi==2025.1.31
- charset-normalizer==3.4.1
- colorama==0.4.6
- distro==1.6.0
- execnet==2.1.1
- fasteners==0.19
- idna==3.10
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==2.1.5
- mock==1.3.0
- node-semver==0.6.1
- packaging==24.2
- parameterized==0.9.0
- patch-ng==1.17.4
- pbr==6.1.1
- pluggy==1.5.0
- pluginbase==1.0.1
- py==1.11.0
- pygments==2.19.1
- pyjwt==1.7.1
- pytest==6.2.5
- pytest-xdist==3.5.0
- python-dateutil==2.9.0.post0
- pyyaml==5.4.1
- requests==2.32.3
- six==1.16.0
- soupsieve==2.6
- toml==0.10.2
- tqdm==4.67.1
- typing-extensions==4.13.0
- urllib3==1.26.20
- waitress==3.0.0
- webob==1.8.9
- webtest==2.0.35
prefix: /opt/conda/envs/conan
| [
"conans/test/integration/environment/test_env.py::test_skip_virtualbuildenv_run",
"conans/test/integration/generators/json_test.py::JsonTest::test_generate_json_info",
"conans/test/integration/package_id/python_requires_package_id_test.py::PythonRequiresPackageIDTest::test_unrelated_conf",
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_extra_flags_via_conf",
"conans/test/unittests/tools/scm/test_git_get_commit.py::test_change_branch_in_root_commit"
] | [] | [
"conans/test/integration/environment/test_env.py::test_complete",
"conans/test/integration/environment/test_env.py::test_profile_included_multiple",
"conans/test/integration/environment/test_env.py::test_profile_buildenv",
"conans/test/integration/environment/test_env.py::test_transitive_order",
"conans/test/integration/environment/test_env.py::test_buildenv_from_requires",
"conans/test/integration/environment/test_env.py::test_diamond_repeated",
"conans/test/integration/environment/test_env.py::test_environment_scripts_generated_envvars",
"conans/test/integration/environment/test_env.py::test_multiple_deactivate",
"conans/test/integration/environment/test_env.py::test_profile_build_env_spaces",
"conans/test/integration/environment/test_env.py::test_deactivate_location",
"conans/test/integration/generators/json_test.py::JsonTest::test_generate_json_filenames",
"conans/test/integration/generators/json_test.py::JsonTest::test_generate_json_info_settings",
"conans/test/integration/generators/json_test.py::JsonTest::test_multiconfig",
"conans/test/integration/generators/json_test.py::JsonTest::test_system_libs",
"conans/test/integration/package_id/python_requires_package_id_test.py::PythonRequiresPackageIDTest::test_change_mode_conf",
"conans/test/integration/package_id/python_requires_package_id_test.py::PythonRequiresPackageIDTest::test_change_mode_package_id",
"conans/test/integration/package_id/python_requires_package_id_test.py::PythonRequiresPackageIDTest::test_default",
"conans/test/integration/package_id/python_requires_package_id_test.py::PythonRequiresForBuildRequiresPackageIDTest::test",
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_cross_build",
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_cross_build_user_toolchain",
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_no_cross_build",
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_cross_arch",
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_no_cross_build_arch",
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_cross_build_conf",
"conans/test/integration/toolchains/cmake/test_cmaketoolchain.py::test_find_builddirs",
"conans/test/integration/toolchains/test_toolchain_namespaces.py::test_bazel_namespace",
"conans/test/integration/toolchains/test_toolchain_namespaces.py::test_autotools_namespace",
"conans/test/integration/toolchains/test_toolchain_namespaces.py::test_multiple_toolchains_one_recipe",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_cmake_toolchain",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_remove",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_template_remove",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_template_change",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_context_change",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_context_update",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_context_replace",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_replace_block",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_add_new_block",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_user_toolchain",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_osx_deployment_target",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_toolset",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_older_msvc_toolset",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_msvc_xp_toolsets",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_no_fpic_when_not_an_option",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_fpic_when_shared_true[True]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_fpic_when_shared_true[False]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_fpic_when_not_shared",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_no_fpic_on_windows",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_fpic_disabled",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_fpic_enabled",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_libcxx_abi_flag",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot[Macos-None-x86_64-macosx]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot[Macos-None-armv7-macosx]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot[iOS-iphonesimulator-armv8-iphonesimulator]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot[watchOS-watchsimulator-armv8-watchsimulator]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot_sdk_mandatory[iOS-None-x86_64-]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot_sdk_mandatory[watchOS-None-armv8-]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_apple_cmake_osx_sysroot_sdk_mandatory[tvOS-None-x86_64-]",
"conans/test/unittests/tools/cmake/test_cmaketoolchain.py::test_variables_types",
"conans/test/unittests/tools/scm/test_git_get_commit.py::test_multi_folder_repo"
] | [] | MIT License | 12,560 | 4,170 | [
"conan/tools/apple/xcodedeps.py",
"conan/tools/cmake/cmake.py",
"conan/tools/cmake/toolchain/blocks.py",
"conan/tools/cmake/toolchain/toolchain.py",
"conan/tools/scm/git.py",
"conans/client/generators/json_generator.py",
"conans/client/subsystems.py",
"conans/model/conan_file.py",
"conans/model/info.py"
] |
|
google__cloud-forensics-utils-444 | 4a2f66c90b6be28f307c13559a2ff4d541b7c5b3 | 2022-04-04 23:54:58 | 4a2f66c90b6be28f307c13559a2ff4d541b7c5b3 | codecov-commenter: # [Codecov](https://codecov.io/gh/google/cloud-forensics-utils/pull/444?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=google) Report
> :exclamation: No coverage uploaded for pull request base (`main@4a2f66c`). [Click here to learn what that means](https://docs.codecov.io/docs/error-reference?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=google#section-missing-base-commit).
> The diff coverage is `n/a`.
```diff
@@ Coverage Diff @@
## main #444 +/- ##
=======================================
Coverage ? 59.14%
=======================================
Files ? 49
Lines ? 4031
Branches ? 0
=======================================
Hits ? 2384
Misses ? 1647
Partials ? 0
```
| Flag | Coverage Δ | |
|---|---|---|
| nosetests | `59.14% <0.00%> (?)` | |
Flags with carried forward coverage won't be shown. [Click here](https://docs.codecov.io/docs/carryforward-flags?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=google#carryforward-flags-in-the-pull-request-comment) to find out more.
------
[Continue to review full report at Codecov](https://codecov.io/gh/google/cloud-forensics-utils/pull/444?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=google).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=google)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/google/cloud-forensics-utils/pull/444?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=google). Last update [4a2f66c...7f037c4](https://codecov.io/gh/google/cloud-forensics-utils/pull/444?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=google). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=google).
| diff --git a/libcloudforensics/providers/gcp/internal/compute.py b/libcloudforensics/providers/gcp/internal/compute.py
index 5103574..0c16d10 100644
--- a/libcloudforensics/providers/gcp/internal/compute.py
+++ b/libcloudforensics/providers/gcp/internal/compute.py
@@ -46,6 +46,10 @@ E2_STANDARD_CPU_CORES = [2, 4, 8, 16, 32]
# Numerical policy_level value for non-hierarchical FW rules
NON_HIERARCHICAL_FW_POLICY_LEVEL = 999
+# Will only matches IDs as names can't start with a number
+# https://cloud.google.com/compute/docs/naming-resources#resource-name-format
+RESOURCE_ID_REGEX = r'^\d{19}$'
+
class GoogleCloudCompute(common.GoogleCloudComputeClient):
"""Class representing all Google Cloud Compute objects in a project.
@@ -140,11 +144,13 @@ class GoogleCloudCompute(common.GoogleCloudComputeClient):
for instance in response['items'][zone]['instances']:
_, zone = instance['zone'].rsplit('/', 1)
name = instance['name']
+ resource_id = instance['id']
deletion_protection = instance.get('deletionProtection', False)
instances[name] = GoogleComputeInstance(
self.project_id,
zone,
name,
+ resource_id=resource_id,
labels=instance.get('labels'),
deletion_protection=deletion_protection)
except KeyError:
@@ -324,7 +330,8 @@ class GoogleCloudCompute(common.GoogleCloudComputeClient):
"""Get instance from project.
Args:
- instance_name (str): The instance name.
+ instance_name (str): The instance identifier, can be either an instance
+ name or ID.
Returns:
GoogleComputeInstance: A Google Compute Instance object.
@@ -334,7 +341,14 @@ class GoogleCloudCompute(common.GoogleCloudComputeClient):
"""
instances = self.Instances()
- instance = instances.get(instance_name)
+
+ if re.match(RESOURCE_ID_REGEX, instance_name):
+ id_match = [instance for instance in instances.values()
+ if instance.resource_id == instance_name]
+ instance = id_match.pop() if id_match else None
+ else:
+ instance = instances.get(instance_name)
+
if not instance:
raise errors.ResourceNotFoundError(
'Instance {0:s} was not found in project {1:s}'.format(
diff --git a/libcloudforensics/providers/gcp/internal/compute_base_resource.py b/libcloudforensics/providers/gcp/internal/compute_base_resource.py
index 4780961..a206b5f 100644
--- a/libcloudforensics/providers/gcp/internal/compute_base_resource.py
+++ b/libcloudforensics/providers/gcp/internal/compute_base_resource.py
@@ -29,6 +29,7 @@ class GoogleComputeBaseResource(common.GoogleCloudComputeClient):
project_id (str): Google Cloud project ID.
zone (str): What zone the resource is in.
name (str): Name of the resource.
+ resource_id (str): The ID number of the resource.
labels (Dict): Dictionary of labels for the resource, if existing.
deletion_protection (bool): True if the resource has deletionProtection
enabled.
@@ -38,6 +39,7 @@ class GoogleComputeBaseResource(common.GoogleCloudComputeClient):
project_id: str,
zone: str,
name: str,
+ resource_id: Optional[str] = None,
labels: Optional[Dict[str, Any]] = None,
deletion_protection: bool = False,
region: Optional[str] = None) -> None:
@@ -47,6 +49,7 @@ class GoogleComputeBaseResource(common.GoogleCloudComputeClient):
project_id: Google Cloud project ID.
zone: What zone the resource is in.
name: Name of the resource.
+ resource_id: The ID number of the resource.
labels: Dictionary of labels for the resource, if existing.
deletion_protection: True if the resource has deletionProtection
enabled.
@@ -56,6 +59,7 @@ class GoogleComputeBaseResource(common.GoogleCloudComputeClient):
self.deletion_protection = deletion_protection
self.zone = zone
self.name = name
+ self.resource_id = resource_id
self.labels = labels
self._data = {} # type: Dict[str, Any]
self.project_id = project_id # type: str
| Add support for specifying resources by ID as well as name
Add support for accessing resources by ID as well as name, for example instance ID for GetInstance. | google/cloud-forensics-utils | diff --git a/tests/providers/gcp/gcp_mocks.py b/tests/providers/gcp/gcp_mocks.py
index 056f016..912c5fb 100644
--- a/tests/providers/gcp/gcp_mocks.py
+++ b/tests/providers/gcp/gcp_mocks.py
@@ -44,7 +44,8 @@ FAKE_SOURCE_PROJECT = gcp_project.GoogleCloudProject(
'fake-source-project', 'fake-zone')
FAKE_INSTANCE = compute.GoogleComputeInstance(
- FAKE_SOURCE_PROJECT.project_id, 'fake-zone', 'fake-instance')
+ FAKE_SOURCE_PROJECT.project_id, 'fake-zone', 'fake-instance',
+ resource_id='0123456789012345678')
FAKE_DISK = compute.GoogleComputeDisk(
FAKE_SOURCE_PROJECT.project_id, 'fake-zone', 'fake-disk')
@@ -101,7 +102,8 @@ MOCK_INSTANCES_AGGREGATED = {
0: {
'instances': [{
'name': FAKE_INSTANCE.name,
- 'zone': '/' + FAKE_INSTANCE.zone
+ 'zone': '/' + FAKE_INSTANCE.zone,
+ 'id': FAKE_INSTANCE.resource_id
}]
}
}
diff --git a/tests/providers/gcp/internal/test_compute.py b/tests/providers/gcp/internal/test_compute.py
index 0841f49..b9dbd82 100644
--- a/tests/providers/gcp/internal/test_compute.py
+++ b/tests/providers/gcp/internal/test_compute.py
@@ -91,6 +91,8 @@ class GoogleCloudComputeTest(unittest.TestCase):
# pylint: enable=protected-access
with self.assertRaises(errors.ResourceNotFoundError):
gcp_mocks.FAKE_SOURCE_PROJECT.compute.GetInstance('non-existent-instance')
+ id_found_instance = gcp_mocks.FAKE_SOURCE_PROJECT.compute.GetInstance(gcp_mocks.FAKE_INSTANCE.resource_id)
+ self.assertEqual(id_found_instance, gcp_mocks.FAKE_INSTANCE)
@typing.no_type_check
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleCloudCompute.ListDisks')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 1
},
"num_modified_files": 2
} | 20220321 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"nose-cov",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y python3-pip"
],
"python": "3.7",
"reqs_path": [
"requirements.txt",
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | adal==1.2.7
alabaster==0.7.13
astroid==2.15.8
azure-common==1.1.28
azure-core==1.30.1
azure-identity==1.15.0
azure-mgmt-compute==23.1.0
azure-mgmt-core==1.4.0
azure-mgmt-monitor==6.0.2
azure-mgmt-network==25.2.0
azure-mgmt-resource==23.0.1
azure-mgmt-storage==21.1.0
azure-storage-blob==12.19.1
Babel==2.14.0
boto3==1.33.13
botocore==1.33.13
cachetools==4.2.4
certifi @ file:///croot/certifi_1671487769961/work/certifi
cffi==1.15.1
charset-normalizer==3.4.1
cov-core==1.15.0
coverage==7.2.7
cryptography==44.0.2
dill==0.3.7
docutils==0.19
durationpy==0.9
ecdsa==0.19.1
exceptiongroup==1.2.2
google-api-core==2.10.2
google-api-python-client==2.166.0
google-auth==1.35.0
google-auth-httplib2==0.2.0
googleapis-common-protos==1.69.2
httplib2==0.22.0
idna==3.10
imagesize==1.4.1
importlib-metadata==6.7.0
iniconfig==2.0.0
isodate==0.7.2
isort==5.11.5
Jinja2==3.1.6
jmespath==1.0.1
kubernetes==32.0.1
lazy-object-proxy==1.9.0
-e git+https://github.com/google/cloud-forensics-utils.git@4a2f66c90b6be28f307c13559a2ff4d541b7c5b3#egg=libcloudforensics
MarkupSafe==2.1.5
mccabe==0.7.0
mock==5.2.0
msal==1.32.0
msal-extensions==1.3.0
msrest==0.7.1
msrestazure==0.6.4.post1
mypy==1.4.1
mypy-extensions==1.0.0
netaddr==1.3.0
nose==1.3.7
nose-cov==1.6
oauthlib==3.2.2
packaging==24.0
platformdirs==4.0.0
pluggy==1.2.0
protobuf==4.24.4
pyasn1==0.5.1
pyasn1-modules==0.3.0
pycparser==2.21
pycryptodome==3.22.0
Pygments==2.17.2
PyJWT==1.7.1
pylint==2.17.7
pyparsing==3.1.4
pytest==7.4.4
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
requests==2.31.0
requests-oauthlib==2.0.0
rsa==4.9
s3transfer==0.8.2
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-autodoc-typehints==1.23.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sshpubkeys==3.3.1
tomli==2.0.1
tomlkit==0.12.5
typed-ast==1.5.5
types-mock==5.1.0.3
types-six==1.16.21.9
typing_extensions==4.7.1
uritemplate==4.1.1
urllib3==1.26.20
websocket-client==1.6.1
wrapt==1.16.0
yapf==0.43.0
zipp==3.15.0
| name: cloud-forensics-utils
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- adal==1.2.7
- alabaster==0.7.13
- astroid==2.15.8
- azure-common==1.1.28
- azure-core==1.30.1
- azure-identity==1.15.0
- azure-mgmt-compute==23.1.0
- azure-mgmt-core==1.4.0
- azure-mgmt-monitor==6.0.2
- azure-mgmt-network==25.2.0
- azure-mgmt-resource==23.0.1
- azure-mgmt-storage==21.1.0
- azure-storage-blob==12.19.1
- babel==2.14.0
- boto3==1.33.13
- botocore==1.33.13
- cachetools==4.2.4
- cffi==1.15.1
- charset-normalizer==3.4.1
- cov-core==1.15.0
- coverage==7.2.7
- cryptography==44.0.2
- dill==0.3.7
- docutils==0.19
- durationpy==0.9
- ecdsa==0.19.1
- exceptiongroup==1.2.2
- google-api-core==2.10.2
- google-api-python-client==2.166.0
- google-auth==1.35.0
- google-auth-httplib2==0.2.0
- googleapis-common-protos==1.69.2
- httplib2==0.22.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- isodate==0.7.2
- isort==5.11.5
- jinja2==3.1.6
- jmespath==1.0.1
- kubernetes==32.0.1
- lazy-object-proxy==1.9.0
- markupsafe==2.1.5
- mccabe==0.7.0
- mock==5.2.0
- msal==1.32.0
- msal-extensions==1.3.0
- msrest==0.7.1
- msrestazure==0.6.4.post1
- mypy==1.4.1
- mypy-extensions==1.0.0
- netaddr==1.3.0
- nose==1.3.7
- nose-cov==1.6
- oauthlib==3.2.2
- packaging==24.0
- platformdirs==4.0.0
- pluggy==1.2.0
- protobuf==4.24.4
- pyasn1==0.5.1
- pyasn1-modules==0.3.0
- pycparser==2.21
- pycryptodome==3.22.0
- pygments==2.17.2
- pyjwt==1.7.1
- pylint==2.17.7
- pyparsing==3.1.4
- pytest==7.4.4
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- requests==2.31.0
- requests-oauthlib==2.0.0
- rsa==4.9
- s3transfer==0.8.2
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-autodoc-typehints==1.23.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sshpubkeys==3.3.1
- tomli==2.0.1
- tomlkit==0.12.5
- typed-ast==1.5.5
- types-mock==5.1.0.3
- types-six==1.16.21.9
- typing-extensions==4.7.1
- uritemplate==4.1.1
- urllib3==1.26.20
- websocket-client==1.6.1
- wrapt==1.16.0
- yapf==0.43.0
- zipp==3.15.0
prefix: /opt/conda/envs/cloud-forensics-utils
| [
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testAbandonInstance",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testCreateDiskFromSnapshot",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testCreateImageFromGcsTarGz",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testCreateInstanceFromArguments",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testCreateInstanceFromArgumentsError",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testCreateInstanceFromRequest",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testCreateInstanceFromRequestError",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testFormatLogMessage",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testGetDisk",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testGetDiskTypes",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testGetImageFamily",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testGetInstance",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testGetMachineTypes",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testGetNetwork",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testGetOrCreateAnalysisVmExist",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testGetOrCreateAnalysisVmNew",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testListDiskByLabels",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testListDisks",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testListInstanceByLabels",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testListInstances",
"tests/providers/gcp/internal/test_compute.py::GoogleCloudComputeTest::testReadStartupScript",
"tests/providers/gcp/internal/test_compute.py::GoogleComputeInstanceTest::testDelete",
"tests/providers/gcp/internal/test_compute.py::GoogleComputeInstanceTest::testGetBootDisk",
"tests/providers/gcp/internal/test_compute.py::GoogleComputeInstanceTest::testGetDisk",
"tests/providers/gcp/internal/test_compute.py::GoogleComputeInstanceTest::testGetNatIps",
"tests/providers/gcp/internal/test_compute.py::GoogleComputeInstanceTest::testGetNormalisedFirewalls",
"tests/providers/gcp/internal/test_compute.py::GoogleComputeInstanceTest::testListDisks",
"tests/providers/gcp/internal/test_compute.py::GoogleComputeDiskTest::testSnapshot"
] | [] | [] | [] | Apache License 2.0 | 12,563 | 1,033 | [
"libcloudforensics/providers/gcp/internal/compute.py",
"libcloudforensics/providers/gcp/internal/compute_base_resource.py"
] |
conan-io__conan-10978 | 83976a6980ba1c13c49323473803d6fa406030db | 2022-04-05 15:42:15 | fba42152d18038a848c85b749d3a87c8b7749210 | diff --git a/conan/tools/gnu/autotools.py b/conan/tools/gnu/autotools.py
index 8776e6dd7..af335a46b 100644
--- a/conan/tools/gnu/autotools.py
+++ b/conan/tools/gnu/autotools.py
@@ -4,28 +4,47 @@ from conan.tools.build import build_jobs
from conan.tools.files.files import load_toolchain_args
from conans.client.subsystems import subsystem_path, deduce_subsystem
from conans.client.build import join_arguments
+from conans.tools import args_to_string
+from conan.tools.files import chdir
class Autotools(object):
- def __init__(self, conanfile, namespace=None):
+ def __init__(self, conanfile, namespace=None, build_script_folder=None):
self._conanfile = conanfile
toolchain_file_content = load_toolchain_args(self._conanfile.generators_folder,
namespace=namespace)
self._configure_args = toolchain_file_content.get("configure_args")
self._make_args = toolchain_file_content.get("make_args")
+ self.default_configure_install_args = True
+ self.build_script_folder = os.path.join(self._conanfile.source_folder, build_script_folder) \
+ if build_script_folder else self._conanfile.source_folder
- def configure(self, build_script_folder=None):
+ def configure(self):
"""
http://jingfenghanmax.blogspot.com.es/2010/09/configure-with-host-target-and-build.html
https://gcc.gnu.org/onlinedocs/gccint/Configure-Terms.html
"""
- source = self._conanfile.source_folder
- if build_script_folder:
- source = os.path.join(self._conanfile.source_folder, build_script_folder)
+ configure_args = []
+ if self.default_configure_install_args and self._conanfile.package_folder:
+ def _get_argument(argument_name, cppinfo_name):
+ elements = getattr(self._conanfile.cpp.package, cppinfo_name)
+ return "--{}=${{prefix}}/{}".format(argument_name, elements[0]) if elements else ""
- configure_cmd = "{}/configure".format(source)
+ # If someone want arguments but not the defaults can pass them in args manually
+ configure_args.extend(["--prefix=%s" % self._conanfile.package_folder.replace("\\", "/"),
+ _get_argument("bindir", "bindirs"),
+ _get_argument("sbindir", "bindirs"),
+ _get_argument("libdir", "libdirs"),
+ _get_argument("includedir", "includedirs"),
+ _get_argument("oldincludedir", "includedirs"),
+ _get_argument("datarootdir", "resdirs")])
+
+ self._configure_args = "{} {}".format(self._configure_args, args_to_string(configure_args)) \
+ if configure_args else self._configure_args
+
+ configure_cmd = "{}/configure".format(self.build_script_folder)
subsystem = deduce_subsystem(self._conanfile, scope="build")
configure_cmd = subsystem_path(subsystem, configure_cmd)
cmd = '"{}" {}'.format(configure_cmd, self._configure_args)
@@ -45,8 +64,19 @@ class Autotools(object):
self._conanfile.run(command)
def install(self):
+ # FIXME: we have to run configure twice because the local flow won't work otherwise
+ # because there's no package_folder until the package step
+ self.configure()
self.make(target="install")
+ def autoreconf(self, args=None):
+ command = ["autoreconf"]
+ args = args or ["--force", "--install"]
+ command.extend(args)
+ command = join_arguments(command)
+ with chdir(self, self._conanfile.source_folder):
+ self._conanfile.run(command)
+
def _use_win_mingw(self):
if hasattr(self._conanfile, 'settings_build'):
os_build = self._conanfile.settings_build.get_safe('os')
diff --git a/conan/tools/gnu/autotoolstoolchain.py b/conan/tools/gnu/autotoolstoolchain.py
index 85deead7e..d859bd655 100644
--- a/conan/tools/gnu/autotoolstoolchain.py
+++ b/conan/tools/gnu/autotoolstoolchain.py
@@ -8,6 +8,7 @@ from conan.tools.env import Environment
from conan.tools.files.files import save_toolchain_args
from conan.tools.gnu.get_gnu_triplet import _get_gnu_triplet
from conan.tools.microsoft import VCVars, is_msvc, msvc_runtime_flag
+from conans.errors import ConanException
from conans.tools import args_to_string
@@ -18,7 +19,6 @@ class AutotoolsToolchain:
self.configure_args = []
self.make_args = []
- self.default_configure_install_args = True
# Flags
self.cxxflags = []
@@ -166,26 +166,20 @@ class AutotoolsToolchain:
self.generate_args()
VCVars(self._conanfile).generate(scope=scope)
+ def _shared_static_args(self):
+ args = []
+ if self._conanfile.options.get_safe("shared", False):
+ args.extend(["--enable-shared", "--disable-static"])
+ else:
+ args.extend(["--disable-shared", "--enable-static", "--with-pic"
+ if self._conanfile.options.get_safe("fPIC", True)
+ else "--without-pic"])
+ return args
+
def generate_args(self):
configure_args = []
+ configure_args.extend(self._shared_static_args())
configure_args.extend(self.configure_args)
-
- if self.default_configure_install_args and self._conanfile.package_folder:
- def _get_cpp_info_value(name):
- # Why not taking cpp.build? because this variables are used by the "cmake install"
- # that correspond to the package folder (even if the root is the build directory)
- elements = getattr(self._conanfile.cpp.package, name)
- return elements[0] if elements else None
-
- # If someone want arguments but not the defaults can pass them in args manually
- configure_args.extend(
- ['--prefix=%s' % self._conanfile.package_folder.replace("\\", "/"),
- "--bindir=${prefix}/%s" % _get_cpp_info_value("bindirs"),
- "--sbindir=${prefix}/%s" % _get_cpp_info_value("bindirs"),
- "--libdir=${prefix}/%s" % _get_cpp_info_value("libdirs"),
- "--includedir=${prefix}/%s" % _get_cpp_info_value("includedirs"),
- "--oldincludedir=${prefix}/%s" % _get_cpp_info_value("includedirs"),
- "--datarootdir=${prefix}/%s" % _get_cpp_info_value("resdirs")])
user_args_str = args_to_string(self.configure_args)
for flag, var in (("host", self._host), ("build", self._build), ("target", self._target)):
if var and flag not in user_args_str:
diff --git a/conans/assets/templates/new_v2_autotools.py b/conans/assets/templates/new_v2_autotools.py
new file mode 100644
index 000000000..413488976
--- /dev/null
+++ b/conans/assets/templates/new_v2_autotools.py
@@ -0,0 +1,233 @@
+import textwrap
+
+from conans.assets.templates.new_v2_cmake import source_cpp, source_h, test_main
+
+conanfile_lib = textwrap.dedent("""
+ import os
+
+ from conan import ConanFile
+ from conan.tools.gnu import AutotoolsToolchain, Autotools
+ from conan.tools.layout import basic_layout
+ from conan.tools.files import chdir
+
+
+ class {package_name}Conan(ConanFile):
+ name = "{name}"
+ version = "{version}"
+
+ # Optional metadata
+ license = "<Put the package license here>"
+ author = "<Put your name here> <And your email here>"
+ url = "<Package recipe repository url here, for issues about the package>"
+ description = "<Description of {package_name} here>"
+ topics = ("<Put some tag here>", "<here>", "<and here>")
+
+ # Binary configuration
+ settings = "os", "compiler", "build_type", "arch"
+ options = {{"shared": [True, False], "fPIC": [True, False]}}
+ default_options = {{"shared": False, "fPIC": True}}
+
+ exports_sources = "configure.ac", "Makefile.am", "src/*"
+
+ def config_options(self):
+ if self.settings.os == "Windows":
+ del self.options.fPIC
+
+ def layout(self):
+ basic_layout(self)
+
+ def generate(self):
+ at_toolchain = AutotoolsToolchain(self)
+ at_toolchain.generate()
+
+ def build(self):
+ autotools = Autotools(self)
+ autotools.autoreconf()
+ autotools.configure()
+ autotools.make()
+
+ def package(self):
+ autotools = Autotools(self)
+ autotools.install()
+
+ def package_info(self):
+ self.cpp_info.libs = ["{name}"]
+ """)
+
+configure_ac = textwrap.dedent("""
+ AC_INIT([{name}], [{version}], [])
+ AM_INIT_AUTOMAKE([-Wall -Werror foreign])
+ AC_PROG_CXX
+ AM_PROG_AR
+ LT_INIT
+ AC_CONFIG_FILES([Makefile src/Makefile])
+ AC_OUTPUT
+ """)
+
+makefile_am = textwrap.dedent("""
+ SUBDIRS = src
+ """)
+
+makefile_am_lib = textwrap.dedent("""
+ lib_LTLIBRARIES = lib{name}.la
+ lib{name}_la_SOURCES = {name}.cpp {name}.h
+ lib{name}_la_HEADERS = {name}.h
+ lib{name}_ladir = $(includedir)
+ """)
+
+test_conanfile = textwrap.dedent("""
+ import os
+
+ from conan import ConanFile
+ from conan.tools.gnu import AutotoolsToolchain, Autotools, AutotoolsDeps
+ from conan.tools.layout import basic_layout
+ from conan.tools.build import cross_building
+ from conan.tools.files import chdir
+
+
+ class {package_name}TestConan(ConanFile):
+ settings = "os", "compiler", "build_type", "arch"
+ # VirtualBuildEnv and VirtualRunEnv can be avoided if "tools.env.virtualenv:auto_use" is defined
+ # (it will be defined in Conan 2.0)
+ generators = "AutotoolsDeps", "AutotoolsToolchain", "VirtualBuildEnv", "VirtualRunEnv"
+ apply_env = False
+ test_type = "explicit"
+
+ def requirements(self):
+ self.requires(self.tested_reference_str)
+
+ def build(self):
+ autotools = Autotools(self)
+ autotools.autoreconf()
+ autotools.configure()
+ autotools.make()
+
+ def layout(self):
+ basic_layout(self)
+
+ def test(self):
+ if not cross_building(self):
+ cmd = os.path.join(self.cpp.build.bindirs[0], "main")
+ self.run(cmd, env="conanrun")
+ """)
+
+test_configure_ac = textwrap.dedent("""
+ AC_INIT([main], [1.0], [])
+ AM_INIT_AUTOMAKE([-Wall -Werror foreign])
+ AC_PROG_CXX
+ AC_PROG_RANLIB
+ AM_PROG_AR
+ AC_CONFIG_FILES([Makefile])
+ AC_OUTPUT
+ """)
+
+test_makefile_am = textwrap.dedent("""
+ bin_PROGRAMS = main
+ main_SOURCES = main.cpp
+ """)
+
+
+def get_autotools_lib_files(name, version, package_name="Pkg"):
+ files = {"conanfile.py": conanfile_lib.format(name=name, version=version,
+ package_name=package_name),
+ "src/Makefile.am": makefile_am_lib.format(name=name, version=version),
+ "src/{}.cpp".format(name): source_cpp.format(name=name, version=version),
+ "src/{}.h".format(name): source_h.format(name=name, version=version),
+ "configure.ac": configure_ac.format(name=name, version=version),
+ "Makefile.am": makefile_am.format(name=name, version=version),
+ "test_package/conanfile.py": test_conanfile.format(name=name, version=version,
+ package_name=package_name),
+ "test_package/main.cpp": test_main.format(name=name),
+ "test_package/configure.ac": test_configure_ac.format(name=name, version=version),
+ "test_package/Makefile.am": test_makefile_am.format(name=name, version=version)}
+ return files
+
+
+conanfile_exe = textwrap.dedent("""
+ import os
+
+ from conan import ConanFile
+ from conan.tools.gnu import AutotoolsToolchain, Autotools
+ from conan.tools.layout import basic_layout
+ from conan.tools.files import chdir
+
+
+ class {package_name}Conan(ConanFile):
+ name = "{name}"
+ version = "{version}"
+
+ # Optional metadata
+ license = "<Put the package license here>"
+ author = "<Put your name here> <And your email here>"
+ url = "<Package recipe repository url here, for issues about the package>"
+ description = "<Description of {package_name} here>"
+ topics = ("<Put some tag here>", "<here>", "<and here>")
+
+ # Binary configuration
+ settings = "os", "compiler", "build_type", "arch"
+
+ # Sources are located in the same place as this recipe, copy them to the recipe
+ exports_sources = "configure.ac", "Makefile.am", "src/*"
+
+ def layout(self):
+ basic_layout(self)
+
+ def generate(self):
+ at_toolchain = AutotoolsToolchain(self)
+ at_toolchain.generate()
+
+ def build(self):
+ autotools = Autotools(self)
+ autotools.autoreconf()
+ autotools.configure()
+ autotools.make()
+
+ def package(self):
+ autotools = Autotools(self)
+ autotools.install()
+ """)
+
+test_conanfile_exe = textwrap.dedent("""
+ import os
+ from conan import ConanFile
+ from conan.tools.build import cross_building
+ from conan.tools.layout import basic_layout
+
+
+ class {package_name}TestConan(ConanFile):
+ settings = "os", "compiler", "build_type", "arch"
+ # VirtualRunEnv can be avoided if "tools.env.virtualenv:auto_use" is defined
+ # (it will be defined in Conan 2.0)
+ generators = "VirtualRunEnv"
+ apply_env = False
+ test_type = "explicit"
+
+ def requirements(self):
+ self.requires(self.tested_reference_str)
+
+ def layout(self):
+ basic_layout(self)
+
+ def test(self):
+ if not cross_building(self):
+ self.run("{name}", env="conanrun")
+ """)
+
+makefile_am_exe = textwrap.dedent("""
+ bin_PROGRAMS = {name}
+ {name}_SOURCES = main.cpp {name}.cpp {name}.h
+ """)
+
+
+def get_autotools_exe_files(name, version, package_name="Pkg"):
+ files = {"conanfile.py": conanfile_exe.format(name=name, version=version,
+ package_name=package_name),
+ "src/Makefile.am": makefile_am_exe.format(name=name, version=version),
+ "src/main.cpp": test_main.format(name=name),
+ "src/{}.cpp".format(name): source_cpp.format(name=name, version=version),
+ "src/{}.h".format(name): source_h.format(name=name, version=version),
+ "configure.ac": configure_ac.format(name=name, version=version),
+ "Makefile.am": makefile_am.format(name=name, version=version),
+ "test_package/conanfile.py": test_conanfile_exe.format(name=name, version=version,
+ package_name=package_name)}
+ return files
diff --git a/conans/client/cmd/new.py b/conans/client/cmd/new.py
index 70478ba76..67e03f1bc 100644
--- a/conans/client/cmd/new.py
+++ b/conans/client/cmd/new.py
@@ -411,6 +411,12 @@ def cmd_new(ref, header=False, pure_c=False, test=False, exports_sources=False,
elif template == "bazel_exe":
from conans.assets.templates.new_v2_bazel import get_bazel_exe_files
files = get_bazel_exe_files(name, version, package_name)
+ elif template == "autotools_lib":
+ from conans.assets.templates.new_v2_autotools import get_autotools_lib_files
+ files = get_autotools_lib_files(name, version, package_name)
+ elif template == "autotools_exe":
+ from conans.assets.templates.new_v2_autotools import get_autotools_exe_files
+ files = get_autotools_exe_files(name, version, package_name)
else:
if not os.path.isabs(template):
template = os.path.join(cache.cache_folder, "templates", "command/new", template)
| [feature] Create "new --template" for Autotools.
Like https://github.com/conan-io/conan/pull/10760/files but for Autotools.
Tests using it, even with windows subsystems.
Better study if a predefined layout is convenient.
| conan-io/conan | diff --git a/conans/test/functional/toolchains/gnu/autotools/test_ios.py b/conans/test/functional/toolchains/gnu/autotools/test_ios.py
index 8507072d9..272d9571c 100644
--- a/conans/test/functional/toolchains/gnu/autotools/test_ios.py
+++ b/conans/test/functional/toolchains/gnu/autotools/test_ios.py
@@ -76,4 +76,5 @@ def test_ios():
conanbuild = load_toolchain_args(client.current_folder)
configure_args = conanbuild["configure_args"]
- assert configure_args == "'--host=aarch64-apple-ios' '--build=x86_64-apple-darwin'"
+ assert configure_args == "'--disable-shared' '--enable-static' '--with-pic' " \
+ "'--host=aarch64-apple-ios' '--build=x86_64-apple-darwin'"
diff --git a/conans/test/functional/toolchains/gnu/test_v2_autotools_template.py b/conans/test/functional/toolchains/gnu/test_v2_autotools_template.py
new file mode 100644
index 000000000..b7d46889b
--- /dev/null
+++ b/conans/test/functional/toolchains/gnu/test_v2_autotools_template.py
@@ -0,0 +1,62 @@
+import platform
+import re
+import os
+
+import pytest
+
+from conans.model.ref import ConanFileReference, PackageReference
+from conans.test.utils.tools import TestClient
+
+
[email protected](platform.system() not in ["Linux", "Darwin"], reason="Requires Autotools")
[email protected]_autotools()
+def test_autotools_lib_template():
+ client = TestClient(path_with_spaces=False)
+ client.run("new hello/0.1 --template=autotools_lib")
+
+ # Local flow works
+ client.run("install . -if=install")
+ client.run("build . -if=install")
+
+ client.run("export-pkg . hello/0.1@ -if=install")
+ package_id = re.search(r"Packaging to (\S+)", str(client.out)).group(1)
+ pref = PackageReference(ConanFileReference.loads("hello/0.1"), package_id)
+ package_folder = client.cache.package_layout(pref.ref).package(pref)
+ assert os.path.exists(os.path.join(package_folder, "include", "hello.h"))
+
+ # Create works
+ client.run("create .")
+ assert "hello/0.1: Hello World Release!" in client.out
+
+ client.run("create . -s build_type=Debug")
+ assert "hello/0.1: Hello World Debug!" in client.out
+
+ # Create + shared works
+ client.save({}, clean_first=True)
+ client.run("new hello/0.1 --template=autotools_lib")
+ client.run("create . -o hello:shared=True")
+ assert "hello/0.1: Hello World Release!" in client.out
+ if platform.system() == "Darwin":
+ client.run_command("otool -l test_package/build-release/main")
+ assert "libhello.0.dylib" in client.out
+ else:
+ client.run_command("ldd test_package/build-release/main")
+ assert "libhello.so.0" in client.out
+
+
[email protected](platform.system() not in ["Linux", "Darwin"], reason="Requires Autotools")
[email protected]_autotools()
+def test_autotools_exe_template():
+ client = TestClient(path_with_spaces=False)
+ client.run("new greet/0.1 --template=autotools_exe")
+ # Local flow works
+ client.run("install . -if=install")
+ client.run("build . -if=install")
+
+ # Create works
+ client.run("create .")
+ assert "greet/0.1: Hello World Release!" in client.out
+
+ client.run("create . -s build_type=Debug")
+ assert "greet/0.1: Hello World Debug!" in client.out
+
diff --git a/conans/test/unittests/tools/gnu/autotools_test.py b/conans/test/unittests/tools/gnu/autotools_test.py
index ed601630b..50a312f2b 100644
--- a/conans/test/unittests/tools/gnu/autotools_test.py
+++ b/conans/test/unittests/tools/gnu/autotools_test.py
@@ -17,9 +17,10 @@ def test_source_folder_works():
conanfile.folders.set_base_install(folder)
sources = "/path/to/sources"
conanfile.folders.set_base_source(sources)
- autotools = Autotools(conanfile)
- autotools.configure(build_script_folder="subfolder")
+ autotools = Autotools(conanfile, build_script_folder="subfolder")
+ autotools.configure()
assert conanfile.command.replace("\\", "/") == '"/path/to/sources/subfolder/configure" -foo bar'
+ autotools = Autotools(conanfile)
autotools.configure()
assert conanfile.command.replace("\\", "/") == '"/path/to/sources/configure" -foo bar'
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 1
},
"num_modified_files": 3
} | 1.47 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.8",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
beautifulsoup4==4.13.3
bottle==0.12.25
certifi==2025.1.31
charset-normalizer==3.4.1
colorama==0.4.6
-e git+https://github.com/conan-io/conan.git@83976a6980ba1c13c49323473803d6fa406030db#egg=conan
distro==1.6.0
execnet==2.1.1
fasteners==0.19
idna==3.10
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==2.1.5
mock==1.3.0
node-semver==0.6.1
packaging==24.2
parameterized==0.9.0
patch-ng==1.17.4
pbr==6.1.1
pluggy==1.5.0
pluginbase==1.0.1
py==1.11.0
Pygments==2.19.1
PyJWT==1.7.1
pytest==6.2.5
pytest-xdist==3.5.0
python-dateutil==2.9.0.post0
PyYAML==5.4.1
requests==2.32.3
six==1.16.0
soupsieve==2.6
toml==0.10.2
tqdm==4.67.1
typing_extensions==4.13.0
urllib3==1.26.20
waitress==3.0.0
WebOb==1.8.9
WebTest==2.0.35
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=24.2=py38h06a4308_0
- python=3.8.20=he870216_0
- readline=8.2=h5eee18b_0
- setuptools=75.1.0=py38h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.44.0=py38h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- beautifulsoup4==4.13.3
- bottle==0.12.25
- certifi==2025.1.31
- charset-normalizer==3.4.1
- colorama==0.4.6
- distro==1.6.0
- execnet==2.1.1
- fasteners==0.19
- idna==3.10
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==2.1.5
- mock==1.3.0
- node-semver==0.6.1
- packaging==24.2
- parameterized==0.9.0
- patch-ng==1.17.4
- pbr==6.1.1
- pluggy==1.5.0
- pluginbase==1.0.1
- py==1.11.0
- pygments==2.19.1
- pyjwt==1.7.1
- pytest==6.2.5
- pytest-xdist==3.5.0
- python-dateutil==2.9.0.post0
- pyyaml==5.4.1
- requests==2.32.3
- six==1.16.0
- soupsieve==2.6
- toml==0.10.2
- tqdm==4.67.1
- typing-extensions==4.13.0
- urllib3==1.26.20
- waitress==3.0.0
- webob==1.8.9
- webtest==2.0.35
prefix: /opt/conda/envs/conan
| [
"conans/test/unittests/tools/gnu/autotools_test.py::test_source_folder_works"
] | [] | [] | [] | MIT License | 12,571 | 4,096 | [
"conan/tools/gnu/autotools.py",
"conan/tools/gnu/autotoolstoolchain.py",
"conans/client/cmd/new.py"
] |
|
burnash__gspread-1021 | 5181e11fea3d71785bd5c7f9b9c8fed50df0bc11 | 2022-04-06 14:46:46 | 3d1092fd26b68fda3b8665c9782abcbe6628f090 | diff --git a/gspread/worksheet.py b/gspread/worksheet.py
index 3058e47..46cebb6 100644
--- a/gspread/worksheet.py
+++ b/gspread/worksheet.py
@@ -376,6 +376,7 @@ class Worksheet:
allow_underscores_in_numeric_literals=False,
numericise_ignore=None,
value_render_option=None,
+ expected_headers=None,
):
"""Returns a list of dictionaries, all of them having the contents of
the spreadsheet with the head row as keys and each of these
@@ -400,7 +401,16 @@ class Worksheet:
be rendered in the the output. See `ValueRenderOption`_ in
the Sheets API.
- .. _ValueRenderOption: https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
+ .. note::
+
+ ValueRenderOption: https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
+
+ :param list expected_headers: (optional) List of expected headers, they must be unique.
+
+ .. note::
+
+ returned dictionaries will contain all headers even if not included in this list
+
"""
idx = head - 1
@@ -412,9 +422,27 @@ class Worksheet:
keys = data[idx]
- # Check keys are uniques
- if len(keys) != len(set(keys)):
- raise GSpreadException("headers must be uniques")
+ # if no given expected headers, expect all of them
+ if expected_headers is None:
+ expected_headers = keys
+
+ # keys must:
+ # - be uniques
+ # - be part of the complete header list
+ # - not contain extra headers
+ expected = set(expected_headers)
+ headers = set(keys)
+
+ # make sure they are uniques
+ if len(expected) != len(expected_headers):
+ raise GSpreadException("the given 'expected_headers' are not uniques")
+
+ if not expected & headers == expected:
+ raise GSpreadException(
+ "the given 'expected_headers' contains unknown headers: {}".format(
+ expected & headers
+ )
+ )
if numericise_ignore == ["all"]:
values = data[idx + 1 :]
| Duplicate header check in 5.2.0 is not backward compatible
**Describe the bug**
A spreadsheet with multiple columns that had a blank header used to load using `get_all_records` before 5.2.0, but it now fails with "headers must be uniques" exception. I presume, but did not confirm, that it is due to this simplification: https://github.com/burnash/gspread/commit/c8a5a7350c40498cf38d3c4a27c748100632804a
**To Reproduce**
Steps to reproduce the behavior:
1. Run `get_all_records` on a spreadsheet with multiple columns with a blank header.
2. See error "headers must be uniques".
**Expected behavior**
This should work as it used to without an error.
**Environment info:**
- Operating System [e.g. Linux, Windows, macOS]: macOS
- Python version: 3.8
- gspread version: 5.2.0
**Stack trace or other output that would be helpful**
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/edx/other/edx-repo-health/repo_health/check_ownership.py", line 79, in check_ownership
records = find_worksheet(google_creds_file, spreadsheet_url, worksheet_id)
File "/edx/other/edx-repo-health/repo_health/check_ownership.py", line 44, in find_worksheet
return worksheet.get_all_records()
File "/edx/venvs/edx-repo-health/lib/python3.8/site-packages/gspread/worksheet.py", line 408, in get_all_records
raise GSpreadException("headers must be uniques")
gspread.exceptions.GSpreadException: headers must be uniques
| burnash/gspread | diff --git a/tests/cassettes/WorksheetTest.test_get_all_records_expected_headers.json b/tests/cassettes/WorksheetTest.test_get_all_records_expected_headers.json
new file mode 100644
index 0000000..71f9f29
--- /dev/null
+++ b/tests/cassettes/WorksheetTest.test_get_all_records_expected_headers.json
@@ -0,0 +1,808 @@
+{
+ "version": 1,
+ "interactions": [
+ {
+ "request": {
+ "method": "POST",
+ "uri": "https://www.googleapis.com/drive/v3/files?supportsAllDrives=True",
+ "body": "{\"name\": \"Test WorksheetTest test_get_all_records_expected_headers\", \"mimeType\": \"application/vnd.google-apps.spreadsheet\"}",
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "123"
+ ],
+ "Content-Type": [
+ "application/json"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Server": [
+ "GSE"
+ ],
+ "Cache-Control": [
+ "no-cache, no-store, max-age=0, must-revalidate"
+ ],
+ "Date": [
+ "Sun, 10 Apr 2022 11:32:47 GMT"
+ ],
+ "Expires": [
+ "Mon, 01 Jan 1990 00:00:00 GMT"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "X-XSS-Protection": [
+ "1; mode=block"
+ ],
+ "Pragma": [
+ "no-cache"
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin"
+ ],
+ "Content-Security-Policy": [
+ "frame-ancestors 'self'"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "content-length": [
+ "206"
+ ]
+ },
+ "body": {
+ "string": "{\n \"kind\": \"drive#file\",\n \"id\": \"10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0\",\n \"name\": \"Test WorksheetTest test_get_all_records_expected_headers\",\n \"mimeType\": \"application/vnd.google-apps.spreadsheet\"\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "GET",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0?includeGridData=false",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Sun, 10 Apr 2022 11:32:48 GMT"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "content-length": [
+ "3354"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0\",\n \"properties\": {\n \"title\": \"Test WorksheetTest test_get_all_records_expected_headers\",\n \"locale\": \"en_US\",\n \"autoRecalc\": \"ON_CHANGE\",\n \"timeZone\": \"Etc/GMT\",\n \"defaultFormat\": {\n \"backgroundColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n },\n \"padding\": {\n \"top\": 2,\n \"right\": 3,\n \"bottom\": 2,\n \"left\": 3\n },\n \"verticalAlignment\": \"BOTTOM\",\n \"wrapStrategy\": \"OVERFLOW_CELL\",\n \"textFormat\": {\n \"foregroundColor\": {},\n \"fontFamily\": \"arial,sans,sans-serif\",\n \"fontSize\": 10,\n \"bold\": false,\n \"italic\": false,\n \"strikethrough\": false,\n \"underline\": false,\n \"foregroundColorStyle\": {\n \"rgbColor\": {}\n }\n },\n \"backgroundColorStyle\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n \"spreadsheetTheme\": {\n \"primaryFontFamily\": \"Arial\",\n \"themeColors\": [\n {\n \"colorType\": \"TEXT\",\n \"color\": {\n \"rgbColor\": {}\n }\n },\n {\n \"colorType\": \"BACKGROUND\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n {\n \"colorType\": \"ACCENT1\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.25882354,\n \"green\": 0.52156866,\n \"blue\": 0.95686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT2\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.91764706,\n \"green\": 0.2627451,\n \"blue\": 0.20784314\n }\n }\n },\n {\n \"colorType\": \"ACCENT3\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.9843137,\n \"green\": 0.7372549,\n \"blue\": 0.015686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT4\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.20392157,\n \"green\": 0.65882355,\n \"blue\": 0.3254902\n }\n }\n },\n {\n \"colorType\": \"ACCENT5\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 0.42745098,\n \"blue\": 0.003921569\n }\n }\n },\n {\n \"colorType\": \"ACCENT6\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.27450982,\n \"green\": 0.7411765,\n \"blue\": 0.7764706\n }\n }\n },\n {\n \"colorType\": \"LINK\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.06666667,\n \"green\": 0.33333334,\n \"blue\": 0.8\n }\n }\n }\n ]\n }\n },\n \"sheets\": [\n {\n \"properties\": {\n \"sheetId\": 0,\n \"title\": \"Sheet1\",\n \"index\": 0,\n \"sheetType\": \"GRID\",\n \"gridProperties\": {\n \"rowCount\": 1000,\n \"columnCount\": 26\n }\n }\n }\n ],\n \"spreadsheetUrl\": \"https://docs.google.com/spreadsheets/d/10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0/edit\"\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "GET",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0?includeGridData=false",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Sun, 10 Apr 2022 11:32:48 GMT"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "content-length": [
+ "3354"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0\",\n \"properties\": {\n \"title\": \"Test WorksheetTest test_get_all_records_expected_headers\",\n \"locale\": \"en_US\",\n \"autoRecalc\": \"ON_CHANGE\",\n \"timeZone\": \"Etc/GMT\",\n \"defaultFormat\": {\n \"backgroundColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n },\n \"padding\": {\n \"top\": 2,\n \"right\": 3,\n \"bottom\": 2,\n \"left\": 3\n },\n \"verticalAlignment\": \"BOTTOM\",\n \"wrapStrategy\": \"OVERFLOW_CELL\",\n \"textFormat\": {\n \"foregroundColor\": {},\n \"fontFamily\": \"arial,sans,sans-serif\",\n \"fontSize\": 10,\n \"bold\": false,\n \"italic\": false,\n \"strikethrough\": false,\n \"underline\": false,\n \"foregroundColorStyle\": {\n \"rgbColor\": {}\n }\n },\n \"backgroundColorStyle\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n \"spreadsheetTheme\": {\n \"primaryFontFamily\": \"Arial\",\n \"themeColors\": [\n {\n \"colorType\": \"TEXT\",\n \"color\": {\n \"rgbColor\": {}\n }\n },\n {\n \"colorType\": \"BACKGROUND\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n {\n \"colorType\": \"ACCENT1\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.25882354,\n \"green\": 0.52156866,\n \"blue\": 0.95686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT2\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.91764706,\n \"green\": 0.2627451,\n \"blue\": 0.20784314\n }\n }\n },\n {\n \"colorType\": \"ACCENT3\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.9843137,\n \"green\": 0.7372549,\n \"blue\": 0.015686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT4\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.20392157,\n \"green\": 0.65882355,\n \"blue\": 0.3254902\n }\n }\n },\n {\n \"colorType\": \"ACCENT5\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 0.42745098,\n \"blue\": 0.003921569\n }\n }\n },\n {\n \"colorType\": \"ACCENT6\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.27450982,\n \"green\": 0.7411765,\n \"blue\": 0.7764706\n }\n }\n },\n {\n \"colorType\": \"LINK\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.06666667,\n \"green\": 0.33333334,\n \"blue\": 0.8\n }\n }\n }\n ]\n }\n },\n \"sheets\": [\n {\n \"properties\": {\n \"sheetId\": 0,\n \"title\": \"Sheet1\",\n \"index\": 0,\n \"sheetType\": \"GRID\",\n \"gridProperties\": {\n \"rowCount\": 1000,\n \"columnCount\": 26\n }\n }\n }\n ],\n \"spreadsheetUrl\": \"https://docs.google.com/spreadsheets/d/10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0/edit\"\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "POST",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0/values/%27Sheet1%27:clear",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "0"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Sun, 10 Apr 2022 11:32:48 GMT"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "content-length": [
+ "107"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0\",\n \"clearedRange\": \"Sheet1!A1:Z1000\"\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "POST",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0:batchUpdate",
+ "body": "{\"requests\": [{\"updateSheetProperties\": {\"properties\": {\"sheetId\": 0, \"gridProperties\": {\"rowCount\": 4, \"columnCount\": 4}}, \"fields\": \"gridProperties/rowCount,gridProperties/columnCount\"}}]}",
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "190"
+ ],
+ "Content-Type": [
+ "application/json"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Sun, 10 Apr 2022 11:32:48 GMT"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "content-length": [
+ "97"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0\",\n \"replies\": [\n {}\n ]\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "GET",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0/values/%27Sheet1%27%21A1%3AD4",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Sun, 10 Apr 2022 11:32:49 GMT"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "content-length": [
+ "58"
+ ]
+ },
+ "body": {
+ "string": "{\n \"range\": \"Sheet1!A1:D4\",\n \"majorDimension\": \"ROWS\"\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "PUT",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0/values/%27Sheet1%27%21A1%3AD4?valueInputOption=RAW",
+ "body": "{\"values\": [[\"A1\", \"B2\", \"C3\", \"D4\"], [1, \"b2\", 1.45, \"\"], [\"\", \"\", \"\", \"\"], [\"A4\", 0.4, \"\", 4]]}",
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "97"
+ ],
+ "Content-Type": [
+ "application/json"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Sun, 10 Apr 2022 11:32:49 GMT"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "content-length": [
+ "169"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0\",\n \"updatedRange\": \"Sheet1!A1:D4\",\n \"updatedRows\": 4,\n \"updatedColumns\": 4,\n \"updatedCells\": 16\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "GET",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0/values/%27Sheet1%27",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Sun, 10 Apr 2022 11:32:49 GMT"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "content-length": [
+ "251"
+ ]
+ },
+ "body": {
+ "string": "{\n \"range\": \"Sheet1!A1:D4\",\n \"majorDimension\": \"ROWS\",\n \"values\": [\n [\n \"A1\",\n \"B2\",\n \"C3\",\n \"D4\"\n ],\n [\n \"1\",\n \"b2\",\n \"1.45\"\n ],\n [],\n [\n \"A4\",\n \"0.4\",\n \"\",\n \"4\"\n ]\n ]\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "GET",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0/values/%27Sheet1%27",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Sun, 10 Apr 2022 11:32:50 GMT"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "content-length": [
+ "251"
+ ]
+ },
+ "body": {
+ "string": "{\n \"range\": \"Sheet1!A1:D4\",\n \"majorDimension\": \"ROWS\",\n \"values\": [\n [\n \"A1\",\n \"B2\",\n \"C3\",\n \"D4\"\n ],\n [\n \"1\",\n \"b2\",\n \"1.45\"\n ],\n [],\n [\n \"A4\",\n \"0.4\",\n \"\",\n \"4\"\n ]\n ]\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "GET",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0/values/%27Sheet1%27",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Sun, 10 Apr 2022 11:32:50 GMT"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "content-length": [
+ "251"
+ ]
+ },
+ "body": {
+ "string": "{\n \"range\": \"Sheet1!A1:D4\",\n \"majorDimension\": \"ROWS\",\n \"values\": [\n [\n \"A1\",\n \"B2\",\n \"C3\",\n \"D4\"\n ],\n [\n \"1\",\n \"b2\",\n \"1.45\"\n ],\n [],\n [\n \"A4\",\n \"0.4\",\n \"\",\n \"4\"\n ]\n ]\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "DELETE",
+ "uri": "https://www.googleapis.com/drive/v3/files/10m8LmO83Ew011BF7O1ON6OlbMpIUAu-ur--ezmMugv0?supportsAllDrives=True",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "0"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 204,
+ "message": "No Content"
+ },
+ "headers": {
+ "Content-Length": [
+ "0"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "Cache-Control": [
+ "no-cache, no-store, max-age=0, must-revalidate"
+ ],
+ "Date": [
+ "Sun, 10 Apr 2022 11:32:50 GMT"
+ ],
+ "Expires": [
+ "Mon, 01 Jan 1990 00:00:00 GMT"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Content-Type": [
+ "text/html"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "Pragma": [
+ "no-cache"
+ ],
+ "Vary": [
+ "Origin, X-Origin"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ]
+ },
+ "body": {
+ "string": ""
+ }
+ }
+ }
+ ]
+}
diff --git a/tests/worksheet_test.py b/tests/worksheet_test.py
index cbd00df..e25403f 100644
--- a/tests/worksheet_test.py
+++ b/tests/worksheet_test.py
@@ -444,7 +444,7 @@ class WorksheetTest(GspreadTest):
@pytest.mark.vcr()
def test_get_all_values(self):
self.sheet.resize(4, 4)
- # put in new values, made from three lists
+ # put in new values
rows = [
["A1", "B1", "", "D1"],
["", "b2", "", ""],
@@ -470,7 +470,7 @@ class WorksheetTest(GspreadTest):
self.sheet.resize(4, 4)
# renames sheet to contain single and double quotes
self.sheet.update_title("D3")
- # put in new values, made from three lists
+ # put in new values
rows = [
["A1", "B1", "", "D1"],
["", "b2", "", ""],
@@ -494,7 +494,7 @@ class WorksheetTest(GspreadTest):
@pytest.mark.vcr()
def test_get_all_records(self):
self.sheet.resize(4, 4)
- # put in new values, made from three lists
+ # put in new values
rows = [
["A1", "B1", "", "D1"],
[1, "b2", 1.45, ""],
@@ -533,7 +533,7 @@ class WorksheetTest(GspreadTest):
@pytest.mark.vcr()
def test_get_all_records_different_header(self):
self.sheet.resize(6, 4)
- # put in new values, made from three lists
+ # put in new values
rows = [
["", "", "", ""],
["", "", "", ""],
@@ -574,7 +574,7 @@ class WorksheetTest(GspreadTest):
@pytest.mark.vcr()
def test_get_all_records_value_render_options(self):
self.sheet.resize(2, 4)
- # put in new values, made from three lists
+ # put in new values
rows = [
["=4/2", "2020-01-01", "string", 53],
["=3/2", 0.12, "1999-01-02", ""],
@@ -614,7 +614,7 @@ class WorksheetTest(GspreadTest):
@pytest.mark.vcr()
def test_get_all_records_duplicate_keys(self):
self.sheet.resize(4, 4)
- # put in new values, made from three lists
+ # put in new values
rows = [
["A1", "A1", "", "D1"],
[1, "b2", 1.45, ""],
@@ -629,6 +629,45 @@ class WorksheetTest(GspreadTest):
with pytest.raises(GSpreadException):
self.sheet.get_all_records()
+ @pytest.mark.vcr(allow_playback_repeats=True)
+ def test_get_all_records_expected_headers(self):
+ self.sheet.resize(4, 4)
+
+ # put in new values
+ rows = [
+ ["A1", "B2", "C3", "D4"],
+ [1, "b2", 1.45, ""],
+ ["", "", "", ""],
+ ["A4", 0.4, "", 4],
+ ]
+ cell_list = self.sheet.range("A1:D4")
+ for cell, value in zip(cell_list, itertools.chain(*rows)):
+ cell.value = value
+ self.sheet.update_cells(cell_list)
+
+ # check non uniques expected headers
+ expected_headers = ["A1", "A1"]
+ with pytest.raises(GSpreadException):
+ self.sheet.get_all_records(expected_headers=expected_headers)
+
+ # check extra headers
+ expected_headers = ["A1", "E5"]
+ with pytest.raises(GSpreadException):
+ self.sheet.get_all_records(expected_headers=expected_headers)
+
+ # check nominal case.
+ expected_headers = ["A1", "C3"]
+ read_records = self.sheet.get_all_records(
+ expected_headers=expected_headers,
+ )
+
+ expected_values_1 = dict(zip(rows[0], rows[1]))
+ expected_values_2 = dict(zip(rows[0], rows[2]))
+ expected_values_3 = dict(zip(rows[0], rows[3]))
+ self.assertDictEqual(expected_values_1, read_records[0])
+ self.assertDictEqual(expected_values_2, read_records[1])
+ self.assertDictEqual(expected_values_3, read_records[2])
+
@pytest.mark.vcr()
def test_get_all_records_numericise_unformatted(self):
self.sheet.resize(2, 4)
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 5.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-vcr",
"vcrpy"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==4.2.4
certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
google-auth==1.12.0
google-auth-oauthlib==0.5.3
-e git+https://github.com/burnash/gspread.git@5181e11fea3d71785bd5c7f9b9c8fed50df0bc11#egg=gspread
idna==3.10
iniconfig==2.1.0
multidict==6.2.0
oauthlib==3.2.2
packaging==24.2
pluggy==1.5.0
propcache==0.3.1
pyasn1==0.6.1
pyasn1_modules==0.4.2
pytest==8.3.5
pytest-vcr==1.0.2
PyYAML==6.0.2
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.0
six==1.17.0
tomli==2.2.1
typing_extensions==4.13.0
urllib3==1.26.20
vcrpy==7.0.0
wrapt==1.17.2
yarl==1.18.3
| name: gspread
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==4.2.4
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- google-auth==1.12.0
- google-auth-oauthlib==0.5.3
- idna==3.10
- iniconfig==2.1.0
- multidict==6.2.0
- oauthlib==3.2.2
- packaging==24.2
- pluggy==1.5.0
- propcache==0.3.1
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pytest==8.3.5
- pytest-vcr==1.0.2
- pyyaml==6.0.2
- requests==2.32.3
- requests-oauthlib==2.0.0
- rsa==4.0
- six==1.17.0
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==1.26.20
- vcrpy==7.0.0
- wrapt==1.17.2
- yarl==1.18.3
prefix: /opt/conda/envs/gspread
| [
"tests/worksheet_test.py::WorksheetTest::test_get_all_records_expected_headers"
] | [] | [
"tests/worksheet_test.py::WorksheetTest::test_acell",
"tests/worksheet_test.py::WorksheetTest::test_append_row",
"tests/worksheet_test.py::WorksheetTest::test_append_row_with_empty_value",
"tests/worksheet_test.py::WorksheetTest::test_append_row_with_empty_value_and_table_range",
"tests/worksheet_test.py::WorksheetTest::test_basic_filters",
"tests/worksheet_test.py::WorksheetTest::test_batch_clear",
"tests/worksheet_test.py::WorksheetTest::test_batch_get",
"tests/worksheet_test.py::WorksheetTest::test_batch_update",
"tests/worksheet_test.py::WorksheetTest::test_cell",
"tests/worksheet_test.py::WorksheetTest::test_clear",
"tests/worksheet_test.py::WorksheetTest::test_delete_row",
"tests/worksheet_test.py::WorksheetTest::test_find",
"tests/worksheet_test.py::WorksheetTest::test_findall",
"tests/worksheet_test.py::WorksheetTest::test_format",
"tests/worksheet_test.py::WorksheetTest::test_freeze",
"tests/worksheet_test.py::WorksheetTest::test_get_all_records",
"tests/worksheet_test.py::WorksheetTest::test_get_all_records_different_header",
"tests/worksheet_test.py::WorksheetTest::test_get_all_records_duplicate_keys",
"tests/worksheet_test.py::WorksheetTest::test_get_all_records_numericise_unformatted",
"tests/worksheet_test.py::WorksheetTest::test_get_all_records_value_render_options",
"tests/worksheet_test.py::WorksheetTest::test_get_all_values",
"tests/worksheet_test.py::WorksheetTest::test_get_all_values_title_is_a1_notation",
"tests/worksheet_test.py::WorksheetTest::test_group_columns",
"tests/worksheet_test.py::WorksheetTest::test_group_rows",
"tests/worksheet_test.py::WorksheetTest::test_hide_columns_rows",
"tests/worksheet_test.py::WorksheetTest::test_insert_row",
"tests/worksheet_test.py::WorksheetTest::test_range",
"tests/worksheet_test.py::WorksheetTest::test_range_get_all_values",
"tests/worksheet_test.py::WorksheetTest::test_range_reversed",
"tests/worksheet_test.py::WorksheetTest::test_range_unbounded",
"tests/worksheet_test.py::WorksheetTest::test_reorder_worksheets",
"tests/worksheet_test.py::WorksheetTest::test_resize",
"tests/worksheet_test.py::WorksheetTest::test_sort",
"tests/worksheet_test.py::WorksheetTest::test_update_acell",
"tests/worksheet_test.py::WorksheetTest::test_update_and_get",
"tests/worksheet_test.py::WorksheetTest::test_update_cell",
"tests/worksheet_test.py::WorksheetTest::test_update_cell_multiline",
"tests/worksheet_test.py::WorksheetTest::test_update_cell_objects",
"tests/worksheet_test.py::WorksheetTest::test_update_cell_unicode",
"tests/worksheet_test.py::WorksheetTest::test_update_cells",
"tests/worksheet_test.py::WorksheetTest::test_update_cells_noncontiguous",
"tests/worksheet_test.py::WorksheetTest::test_update_cells_unicode",
"tests/worksheet_test.py::WorksheetTest::test_worksheet_notes",
"tests/worksheet_test.py::WorksheetTest::test_worksheet_update_index"
] | [] | MIT License | 12,579 | 527 | [
"gspread/worksheet.py"
] |
|
radiasoft__pykern-158 | 253ff7fa844d592cd544e2961036d27f51f05faa | 2022-04-07 20:43:22 | ae582852dfe38aa625e572c3c4917e29c81d8e7d | diff --git a/pykern/pkunit.py b/pykern/pkunit.py
index c76ac2a..fc72b1b 100644
--- a/pykern/pkunit.py
+++ b/pykern/pkunit.py
@@ -239,12 +239,17 @@ def file_eq(expect_path, *args, **kwargs):
actual_path = b
if not isinstance(actual_path, pykern.pkconst.PY_PATH_LOCAL_TYPE):
actual_path = work_dir().join(actual_path)
- actual = kwargs['actual'] if a else pkio.read_text(actual_path)
+ if a:
+ actual = kwargs['actual']
+ if actual_path.exists():
+ pkfail('actual={} and actual_path={} both exist', actual, actual_path)
+ else:
+ actual = pkio.read_text(actual_path)
if expect_path.ext == '.json' and not actual_path.exists():
- e = pykern.pkjson.load_any(expect_path)
+ e = pkio.read_text(expect_path)
if a:
pkio.mkdir_parent_only(actual_path)
- pykern.pkjson.dump_pretty(actual, filename=actual_path)
+ actual = pykern.pkjson.dump_pretty(actual, filename=actual_path)
else:
if j:
import pykern.pkjinja
| pkunit.file_eq always should compare files
Almost always compares objects instead | radiasoft/pykern | diff --git a/tests/pkunit_data/file_eq1.json b/tests/pkunit_data/file_eq1.json
new file mode 100644
index 0000000..7326da5
--- /dev/null
+++ b/tests/pkunit_data/file_eq1.json
@@ -0,0 +1,1 @@
+"array('d', [1.0])"
diff --git a/tests/pkunit_data/file_eq2.txt b/tests/pkunit_data/file_eq2.txt
new file mode 100644
index 0000000..e69de29
diff --git a/tests/pkunit_data/file_eq3.txt b/tests/pkunit_data/file_eq3.txt
new file mode 100644
index 0000000..339f0be
--- /dev/null
+++ b/tests/pkunit_data/file_eq3.txt
@@ -0,0 +1,1 @@
+something else
\ No newline at end of file
diff --git a/tests/pkunit_test.py b/tests/pkunit_test.py
index 31acebb..40b0b6e 100644
--- a/tests/pkunit_test.py
+++ b/tests/pkunit_test.py
@@ -5,6 +5,9 @@ u"""PyTest for :mod:`pykern.pkunit`
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
+import pkgutil
+
+import py
import pytest
def test_assert_object_with_json():
@@ -55,6 +58,22 @@ def test_empty_work_dir():
'Ensure directory was created'
+def test_file_eq():
+ import array
+ import pykern.pkunit
+ import pykern.pkio
+
+ a = array.ArrayType('d', [1])
+ pykern.pkunit.file_eq('file_eq1.json', actual=a)
+
+ with pykern.pkunit.pkexcept(TypeError):
+ pykern.pkunit.file_eq('file_eq2.txt', actual=dict())
+ d = pykern.pkunit.empty_work_dir()
+ pykern.pkio.write_text(d.join('file_eq3.txt'), 'something')
+ with pykern.pkunit.pkexcept('both exist'):
+ pykern.pkunit.file_eq('file_eq3.txt', actual='something else')
+
+
def test_import_module_from_data_dir(monkeypatch):
from pykern import pkunit
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | stable | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
argh==0.27.2
Babel==2.14.0
bleach==6.0.0
cachetools==5.5.2
certifi @ file:///croot/certifi_1671487769961/work/certifi
cffi==1.15.1
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
cryptography==44.0.2
distlib==0.3.9
docutils==0.19
exceptiongroup==1.2.2
filelock==3.12.2
future==1.0.0
github3.py==4.0.1
idna==3.10
imagesize==1.4.1
importlib-metadata==6.7.0
importlib-resources==5.12.0
iniconfig==2.0.0
jaraco.classes==3.2.3
jeepney==0.9.0
Jinja2==3.1.6
keyring==24.1.1
markdown-it-py==2.2.0
MarkupSafe==2.1.5
mdurl==0.1.2
more-itertools==9.1.0
packaging==24.0
path==16.6.0
path.py==12.5.0
pkginfo==1.10.0
platformdirs==4.0.0
pluggy==1.2.0
psutil==7.0.0
py==1.11.0
py-cpuinfo==9.0.0
pycparser==2.21
Pygments==2.17.2
PyJWT==2.8.0
-e git+https://github.com/radiasoft/pykern.git@253ff7fa844d592cd544e2961036d27f51f05faa#egg=pykern
pyproject-api==1.5.3
pytest==7.4.4
python-dateutil==2.9.0.post0
pytz==2025.2
readme-renderer==37.3
requests==2.31.0
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==13.8.1
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.8
SecretStorage==3.3.3
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==2.0.1
tox==4.8.0
twine==4.0.2
typing_extensions==4.7.1
uritemplate==4.1.1
urllib3==2.0.7
virtualenv==20.26.6
webencodings==0.5.1
zipp==3.15.0
| name: pykern
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- argh==0.27.2
- babel==2.14.0
- bleach==6.0.0
- cachetools==5.5.2
- cffi==1.15.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- cryptography==44.0.2
- distlib==0.3.9
- docutils==0.19
- exceptiongroup==1.2.2
- filelock==3.12.2
- future==1.0.0
- github3-py==4.0.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==6.7.0
- importlib-resources==5.12.0
- iniconfig==2.0.0
- jaraco-classes==3.2.3
- jeepney==0.9.0
- jinja2==3.1.6
- keyring==24.1.1
- markdown-it-py==2.2.0
- markupsafe==2.1.5
- mdurl==0.1.2
- more-itertools==9.1.0
- packaging==24.0
- path==16.6.0
- path-py==12.5.0
- pkginfo==1.10.0
- platformdirs==4.0.0
- pluggy==1.2.0
- psutil==7.0.0
- py==1.11.0
- py-cpuinfo==9.0.0
- pycparser==2.21
- pygments==2.17.2
- pyjwt==2.8.0
- pyproject-api==1.5.3
- pytest==7.4.4
- python-dateutil==2.9.0.post0
- pytz==2025.2
- readme-renderer==37.3
- requests==2.31.0
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==13.8.1
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.8
- secretstorage==3.3.3
- setuptools==56.2.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==2.0.1
- tox==4.8.0
- twine==4.0.2
- typing-extensions==4.7.1
- uritemplate==4.1.1
- urllib3==2.0.7
- virtualenv==20.26.6
- webencodings==0.5.1
- zipp==3.15.0
prefix: /opt/conda/envs/pykern
| [
"tests/pkunit_test.py::test_file_eq"
] | [] | [
"tests/pkunit_test.py::test_assert_object_with_json",
"tests/pkunit_test.py::test_data_dir",
"tests/pkunit_test.py::test_data_yaml",
"tests/pkunit_test.py::test_empty_work_dir",
"tests/pkunit_test.py::test_import_module_from_data_dir",
"tests/pkunit_test.py::test_pkexcept",
"tests/pkunit_test.py::test_pkok",
"tests/pkunit_test.py::test_pkre_convert"
] | [] | Apache License 2.0 | 12,590 | 302 | [
"pykern/pkunit.py"
] |
|
borgbackup__borg-6556 | eba6d5cd1c4237b45273b4496ebbbd4e64783179 | 2022-04-10 02:37:28 | ae417cccf6a2653c010a1f379f3fdef8e55d7ccd | diff --git a/src/borg/archiver.py b/src/borg/archiver.py
index 318a8f20..d2a74562 100644
--- a/src/borg/archiver.py
+++ b/src/borg/archiver.py
@@ -46,7 +46,7 @@
from .constants import * # NOQA
from .compress import CompressionSpec
from .crypto.key import key_creator, key_argument_names, tam_required_file, tam_required
- from .crypto.key import RepoKey, KeyfileKey, Blake2RepoKey, Blake2KeyfileKey
+ from .crypto.key import RepoKey, KeyfileKey, Blake2RepoKey, Blake2KeyfileKey, FlexiKey
from .crypto.keymanager import KeyManager
from .helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR, EXIT_SIGNAL_BASE
from .helpers import Error, NoManifestError, set_ec
@@ -622,12 +622,11 @@ def chunkit(chunker_name, *args, **kwargs):
for spec, func in tests:
print(f"{spec:<24} {size:<10} {timeit(func, number=100):.3f}s")
- from borg.helpers.passphrase import Passphrase
print("KDFs (slow is GOOD, use argon2!) ===============================")
count = 5
for spec, func in [
- ("pbkdf2", lambda: Passphrase('mypassphrase').kdf(b'salt'*8, PBKDF2_ITERATIONS, 32)),
- ("argon2", lambda: Passphrase('mypassphrase').argon2(64, b'S' * ARGON2_SALT_BYTES, **ARGON2_ARGS)),
+ ("pbkdf2", lambda: FlexiKey.pbkdf2('mypassphrase', b'salt'*8, PBKDF2_ITERATIONS, 32)),
+ ("argon2", lambda: FlexiKey.argon2('mypassphrase', 64, b'S' * ARGON2_SALT_BYTES, **ARGON2_ARGS)),
]:
print(f"{spec:<24} {count:<10} {timeit(func, number=count):.3f}s")
diff --git a/src/borg/crypto/key.py b/src/borg/crypto/key.py
index d117b658..9fe2ad9a 100644
--- a/src/borg/crypto/key.py
+++ b/src/borg/crypto/key.py
@@ -3,12 +3,15 @@
import os
import textwrap
from binascii import a2b_base64, b2a_base64, hexlify
-from hashlib import sha256
+from hashlib import sha256, pbkdf2_hmac
+from typing import Literal
from ..logger import create_logger
logger = create_logger()
+import argon2.low_level
+
from ..constants import * # NOQA
from ..compress import Compressor
from ..helpers import StableDict
@@ -447,15 +450,53 @@ def decrypt_key_file(self, data, passphrase):
else:
raise UnsupportedKeyFormatError()
+ @staticmethod
+ def pbkdf2(passphrase, salt, iterations, output_len_in_bytes):
+ if os.environ.get("BORG_TESTONLY_WEAKEN_KDF") == "1":
+ iterations = 1
+ return pbkdf2_hmac('sha256', passphrase.encode('utf-8'), salt, iterations, output_len_in_bytes)
+
+ @staticmethod
+ def argon2(
+ passphrase: str,
+ output_len_in_bytes: int,
+ salt: bytes,
+ time_cost: int,
+ memory_cost: int,
+ parallelism: int,
+ type: Literal['i', 'd', 'id']
+ ) -> bytes:
+ if os.environ.get("BORG_TESTONLY_WEAKEN_KDF") == "1":
+ time_cost = 1
+ parallelism = 1
+ # 8 is the smallest value that avoids the "Memory cost is too small" exception
+ memory_cost = 8
+ type_map = {
+ 'i': argon2.low_level.Type.I,
+ 'd': argon2.low_level.Type.D,
+ 'id': argon2.low_level.Type.ID,
+ }
+ key = argon2.low_level.hash_secret_raw(
+ secret=passphrase.encode("utf-8"),
+ hash_len=output_len_in_bytes,
+ salt=salt,
+ time_cost=time_cost,
+ memory_cost=memory_cost,
+ parallelism=parallelism,
+ type=type_map[type],
+ )
+ return key
+
def decrypt_key_file_pbkdf2(self, encrypted_key, passphrase):
- key = passphrase.kdf(encrypted_key.salt, encrypted_key.iterations, 32)
+ key = self.pbkdf2(passphrase, encrypted_key.salt, encrypted_key.iterations, 32)
data = AES(key, b'\0'*16).decrypt(encrypted_key.data)
if hmac.compare_digest(hmac_sha256(key, data), encrypted_key.hash):
return data
return None
def decrypt_key_file_argon2(self, encrypted_key, passphrase):
- key = passphrase.argon2(
+ key = self.argon2(
+ passphrase,
output_len_in_bytes=64,
salt=encrypted_key.salt,
time_cost=encrypted_key.argon2_time_cost,
@@ -485,7 +526,7 @@ def encrypt_key_file(self, data, passphrase, algorithm):
def encrypt_key_file_pbkdf2(self, data, passphrase):
salt = os.urandom(32)
iterations = PBKDF2_ITERATIONS
- key = passphrase.kdf(salt, iterations, 32)
+ key = self.pbkdf2(passphrase, salt, iterations, 32)
hash = hmac_sha256(key, data)
cdata = AES(key, b'\0'*16).encrypt(data)
enc_key = EncryptedKey(
@@ -500,7 +541,8 @@ def encrypt_key_file_pbkdf2(self, data, passphrase):
def encrypt_key_file_argon2(self, data, passphrase):
salt = os.urandom(ARGON2_SALT_BYTES)
- key = passphrase.argon2(
+ key = self.argon2(
+ passphrase,
output_len_in_bytes=64,
salt=salt,
**ARGON2_ARGS,
diff --git a/src/borg/helpers/passphrase.py b/src/borg/helpers/passphrase.py
index 52ecc11b..b53e26ad 100644
--- a/src/borg/helpers/passphrase.py
+++ b/src/borg/helpers/passphrase.py
@@ -3,8 +3,6 @@
import shlex
import subprocess
import sys
-from hashlib import pbkdf2_hmac
-from typing import Literal
from . import bin_to_hex
from . import Error
@@ -13,8 +11,6 @@
from ..logger import create_logger
-import argon2.low_level
-
logger = create_logger()
@@ -139,38 +135,3 @@ def new(cls, allow_empty=False):
def __repr__(self):
return '<Passphrase "***hidden***">'
-
- def kdf(self, salt, iterations, length):
- if os.environ.get("BORG_TESTONLY_WEAKEN_KDF") == "1":
- iterations = 1
- return pbkdf2_hmac('sha256', self.encode('utf-8'), salt, iterations, length)
-
- def argon2(
- self,
- output_len_in_bytes: int,
- salt: bytes,
- time_cost,
- memory_cost,
- parallelism,
- type: Literal['i', 'd', 'id']
- ) -> bytes:
- if os.environ.get("BORG_TESTONLY_WEAKEN_KDF") == "1":
- time_cost = 1
- parallelism = 1
- # 8 is the smallest value that avoids the "Memory cost is too small" exception
- memory_cost = 8
- type_map = {
- 'i': argon2.low_level.Type.I,
- 'd': argon2.low_level.Type.D,
- 'id': argon2.low_level.Type.ID,
- }
- key = argon2.low_level.hash_secret_raw(
- secret=self.encode("utf-8"),
- hash_len=output_len_in_bytes,
- salt=salt,
- time_cost=time_cost,
- memory_cost=memory_cost,
- parallelism=parallelism,
- type=type_map[type],
- )
- return key
| move kdf code
`borg.helpers.passphrase` module has a `Passphrase.kdf` method.
- that's crypto code, so it should rather be below `borg.crypto` package
- all the other code there is just rather simple input / output / env var processing
@hexagonrecursion ^^^
(master branch only) | borgbackup/borg | diff --git a/src/borg/testsuite/crypto.py b/src/borg/testsuite/crypto.py
index 3465bb8f..fd0a57e7 100644
--- a/src/borg/testsuite/crypto.py
+++ b/src/borg/testsuite/crypto.py
@@ -10,8 +10,7 @@
from ..crypto.low_level import bytes_to_long, bytes_to_int, long_to_bytes
from ..crypto.low_level import hkdf_hmac_sha512
from ..crypto.low_level import AES, hmac_sha256
-from ..crypto.key import KeyfileKey, UnsupportedKeyFormatError, RepoKey
-from ..helpers.passphrase import Passphrase
+from ..crypto.key import KeyfileKey, UnsupportedKeyFormatError, RepoKey, FlexiKey
from ..helpers import msgpack
from ..constants import KEY_ALGORITHMS
@@ -260,7 +259,7 @@ def test_hkdf_hmac_sha512_5(self):
assert okm == bytes.fromhex('1407d46013d98bc6decefcfee55f0f90b0c7f63d68eb1a80eaf07e953cfc0a3a5240a155d6e4daa965bb')
-def test_decrypt_key_file_argon2_aes256_ctr_hmac_sha256(monkeypatch):
+def test_decrypt_key_file_argon2_aes256_ctr_hmac_sha256():
plain = b'hello'
# echo -n "hello, pass phrase" | argon2 saltsaltsaltsalt -id -t 1 -k 8 -p 1 -l 64 -r
key = bytes.fromhex('d07cc7f9cfb483303e0b9fec176b2a9c559bb70c3a9fb0d5f9c0c23527cd09570212449f09f8cd28c1a41b73fa0098e889c3f2642e87c392e51f95d70d248d9d')
@@ -282,21 +281,18 @@ def test_decrypt_key_file_argon2_aes256_ctr_hmac_sha256(monkeypatch):
'algorithm': 'argon2 aes256-ctr hmac-sha256',
'data': envelope,
})
- monkeypatch.setenv('BORG_PASSPHRASE', "hello, pass phrase")
- passphrase = Passphrase.new()
key = KeyfileKey(None)
- decrypted = key.decrypt_key_file(encrypted, passphrase)
+ decrypted = key.decrypt_key_file(encrypted, "hello, pass phrase")
assert decrypted == plain
-def test_decrypt_key_file_pbkdf2_sha256_aes256_ctr_hmac_sha256(monkeypatch):
+def test_decrypt_key_file_pbkdf2_sha256_aes256_ctr_hmac_sha256():
plain = b'hello'
salt = b'salt'*4
- monkeypatch.setenv('BORG_PASSPHRASE', "hello, pass phrase")
- passphrase = Passphrase.new()
- key = passphrase.kdf(salt, iterations=1, length=32)
+ passphrase = "hello, pass phrase"
+ key = FlexiKey.pbkdf2(passphrase, salt, 1, 32)
hash = hmac_sha256(key, plain)
data = AES(key, b'\0'*16).encrypt(plain)
encrypted = msgpack.packb({
@@ -314,10 +310,8 @@ def test_decrypt_key_file_pbkdf2_sha256_aes256_ctr_hmac_sha256(monkeypatch):
assert decrypted == plain
-def test_decrypt_key_file_unsupported_algorithm(monkeypatch):
+def test_decrypt_key_file_unsupported_algorithm():
"""We will add more algorithms in the future. We should raise a helpful error."""
- monkeypatch.setenv('BORG_PASSPHRASE', "hello, pass phrase")
- passphrase = Passphrase.new()
key = KeyfileKey(None)
encrypted = msgpack.packb({
'algorithm': 'THIS ALGORITHM IS NOT SUPPORTED',
@@ -325,20 +319,18 @@ def test_decrypt_key_file_unsupported_algorithm(monkeypatch):
})
with pytest.raises(UnsupportedKeyFormatError):
- key.decrypt_key_file(encrypted, passphrase)
+ key.decrypt_key_file(encrypted, "hello, pass phrase")
-def test_decrypt_key_file_v2_is_unsupported(monkeypatch):
+def test_decrypt_key_file_v2_is_unsupported():
"""There may eventually be a version 2 of the format. For now we should raise a helpful error."""
- monkeypatch.setenv('BORG_PASSPHRASE', "hello, pass phrase")
- passphrase = Passphrase.new()
key = KeyfileKey(None)
encrypted = msgpack.packb({
'version': 2,
})
with pytest.raises(UnsupportedKeyFormatError):
- key.decrypt_key_file(encrypted, passphrase)
+ key.decrypt_key_file(encrypted, "hello, pass phrase")
@pytest.mark.parametrize('cli_argument, expected_algorithm', KEY_ALGORITHMS.items())
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libacl1-dev libssl-dev liblz4-dev libzstd-dev libxxhash-dev libdeflate-dev build-essential pkg-config python3-pkgconfig"
],
"python": "3.9",
"reqs_path": [
"requirements.d/development.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | argon2-cffi==23.1.0
argon2-cffi-bindings==21.2.0
backports.tarfile==1.2.0
-e git+https://github.com/borgbackup/borg.git@eba6d5cd1c4237b45273b4496ebbbd4e64783179#egg=borgbackup
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
coverage==7.8.0
cryptography==44.0.2
Cython==3.0.12
distlib==0.3.9
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
id==1.5.0
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
keyring==25.6.0
markdown-it-py==3.0.0
mdurl==0.1.2
more-itertools==10.6.0
msgpack==1.0.3
nh3==0.2.21
packaging==24.2
pkgconfig==1.5.5
platformdirs==4.3.7
pluggy==1.5.0
py-cpuinfo==9.0.0
pycparser==2.22
Pygments==2.19.1
pyproject-api==1.9.0
pytest==8.3.5
pytest-benchmark==5.1.0
pytest-cov==6.0.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
readme_renderer==44.0
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
SecretStorage==3.3.3
setuptools-scm==8.2.0
six==1.17.0
tomli==2.2.1
tox==4.25.0
twine==6.1.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: borg
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argon2-cffi==23.1.0
- argon2-cffi-bindings==21.2.0
- backports-tarfile==1.2.0
- borgbackup==1.3.0.dev36+geba6d5cd
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- coverage==7.8.0
- cryptography==44.0.2
- cython==3.0.12
- distlib==0.3.9
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- id==1.5.0
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- keyring==25.6.0
- markdown-it-py==3.0.0
- mdurl==0.1.2
- more-itertools==10.6.0
- msgpack==1.0.3
- nh3==0.2.21
- packaging==24.2
- pkgconfig==1.5.5
- platformdirs==4.3.7
- pluggy==1.5.0
- py-cpuinfo==9.0.0
- pycparser==2.22
- pygments==2.19.1
- pyproject-api==1.9.0
- pytest==8.3.5
- pytest-benchmark==5.1.0
- pytest-cov==6.0.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- readme-renderer==44.0
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- secretstorage==3.3.3
- setuptools-scm==8.2.0
- six==1.17.0
- tomli==2.2.1
- tox==4.25.0
- twine==6.1.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/borg
| [
"src/borg/testsuite/crypto.py::test_decrypt_key_file_argon2_aes256_ctr_hmac_sha256",
"src/borg/testsuite/crypto.py::test_decrypt_key_file_pbkdf2_sha256_aes256_ctr_hmac_sha256"
] | [] | [
"src/borg/testsuite/crypto.py::CryptoTestCase::test_AE",
"src/borg/testsuite/crypto.py::CryptoTestCase::test_AEAD",
"src/borg/testsuite/crypto.py::CryptoTestCase::test_AEAD_with_more_AAD",
"src/borg/testsuite/crypto.py::CryptoTestCase::test_AES256_CTR_HMAC_SHA256",
"src/borg/testsuite/crypto.py::CryptoTestCase::test_AES256_CTR_HMAC_SHA256_aad",
"src/borg/testsuite/crypto.py::CryptoTestCase::test_UNENCRYPTED",
"src/borg/testsuite/crypto.py::CryptoTestCase::test_bytes_to_int",
"src/borg/testsuite/crypto.py::CryptoTestCase::test_bytes_to_long",
"src/borg/testsuite/crypto.py::CryptoTestCase::test_hkdf_hmac_sha512",
"src/borg/testsuite/crypto.py::CryptoTestCase::test_hkdf_hmac_sha512_2",
"src/borg/testsuite/crypto.py::CryptoTestCase::test_hkdf_hmac_sha512_3",
"src/borg/testsuite/crypto.py::CryptoTestCase::test_hkdf_hmac_sha512_4",
"src/borg/testsuite/crypto.py::CryptoTestCase::test_hkdf_hmac_sha512_5",
"src/borg/testsuite/crypto.py::test_decrypt_key_file_unsupported_algorithm",
"src/borg/testsuite/crypto.py::test_decrypt_key_file_v2_is_unsupported",
"src/borg/testsuite/crypto.py::test_key_file_roundtrip[pbkdf2-sha256]",
"src/borg/testsuite/crypto.py::test_key_file_roundtrip[argon2-argon2",
"src/borg/testsuite/crypto.py::test_repo_key_detect_does_not_raise_integrity_error"
] | [] | BSD License | 12,597 | 1,990 | [
"src/borg/archiver.py",
"src/borg/crypto/key.py",
"src/borg/helpers/passphrase.py"
] |
|
Unidata__MetPy-2422 | 0521fbd97654aa88dbce2a85fa1cb0abbb20c540 | 2022-04-10 02:51:19 | 394fc195e3b72fd88d7cf4c1afe862588004e433 | diff --git a/src/metpy/xarray.py b/src/metpy/xarray.py
index 9b0b4e0e8f..d9d0cb7edf 100644
--- a/src/metpy/xarray.py
+++ b/src/metpy/xarray.py
@@ -174,6 +174,22 @@ class MetPyDataArrayAccessor:
"""
return self.quantify().copy(data=self.unit_array.to(units))
+ def convert_to_base_units(self):
+ """Return new DataArray with values converted to base units.
+
+ See Also
+ --------
+ convert_units
+
+ Notes
+ -----
+ Any cached/lazy-loaded data (except that in a Dask array) will be loaded into memory
+ by this operation. Do not utilize on moderate- to large-sized remote datasets before
+ subsetting!
+
+ """
+ return self.quantify().copy(data=self.unit_array.to_base_units())
+
def convert_coordinate_units(self, coord, units):
"""Return new DataArray with specified coordinate converted to different units.
diff --git a/tutorials/xarray_tutorial.py b/tutorials/xarray_tutorial.py
index 784f367de5..8e676ccf20 100644
--- a/tutorials/xarray_tutorial.py
+++ b/tutorials/xarray_tutorial.py
@@ -227,6 +227,12 @@ heights_at_45_north
temperature_degc = temperature[0].metpy.convert_units('degC')
temperature_degc
+#########################################################################
+# To base unit conversion:
+
+temperature_degk = temperature_degc.metpy.convert_to_base_units()
+temperature_degk
+
#########################################################################
# Unit conversion for coordinates:
heights_on_hpa_levels = heights.metpy.convert_coordinate_units('isobaric3', 'hPa')
| add xarray accessor interface for `to_base_units`
### What should we add?
Per @kgoebber at today's dev call, a shorthand for pint's [`to_base_units`](https://pint.readthedocs.io/en/latest/developers_reference.html#pint.Quantity.to_base_units) would be a valuable addition to the xarray accessor | Unidata/MetPy | diff --git a/tests/test_xarray.py b/tests/test_xarray.py
index 09d44ec022..0805501739 100644
--- a/tests/test_xarray.py
+++ b/tests/test_xarray.py
@@ -153,6 +153,19 @@ def test_convert_units(test_var):
assert_almost_equal(result[0, 0, 0, 0], 18.44 * units.degC, 2)
+def test_convert_to_base_units(test_ds):
+ """Test conversion of units."""
+ uwnd = test_ds.u_wind.metpy.quantify()
+ result = (uwnd * (500 * units.hPa)).metpy.convert_to_base_units()
+
+ # Check that units are updated without modifying original
+ assert result.metpy.units == units('kg s**-3')
+ assert test_ds.u_wind.metpy.units == units('m/s')
+
+ # Make sure we now get an array back with properly converted values
+ assert_almost_equal(result[0, 0, 0, 0], -448416.12 * units('kg s**-3'), 2)
+
+
def test_convert_coordinate_units(test_ds_generic):
"""Test conversion of coordinate units."""
result = test_ds_generic['test'].metpy.convert_coordinate_units('b', 'percent')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 2
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"ci/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
cycler==0.12.1
exceptiongroup==1.2.2
execnet==2.1.1
fonttools==4.56.0
idna==3.10
importlib-resources==5.6.0
iniconfig==2.1.0
kiwisolver==1.4.7
matplotlib==3.5.1
-e git+https://github.com/Unidata/MetPy.git@0521fbd97654aa88dbce2a85fa1cb0abbb20c540#egg=MetPy
numpy==1.22.3
packaging==24.2
pandas==1.4.2
pillow==11.1.0
Pint==0.19.1
pluggy==1.5.0
pooch==1.6.0
pyparsing==3.2.3
pyproj==3.3.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.32.3
scipy==1.8.0
six==1.17.0
tomli==2.2.1
traitlets==5.1.1
typing_extensions==4.13.0
urllib3==2.3.0
xarray==2022.3.0
zipp==3.21.0
| name: MetPy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- cycler==0.12.1
- exceptiongroup==1.2.2
- execnet==2.1.1
- fonttools==4.56.0
- idna==3.10
- importlib-resources==5.6.0
- iniconfig==2.1.0
- kiwisolver==1.4.7
- matplotlib==3.5.1
- metpy==1.3.0.post3+g0521fbd976
- numpy==1.22.3
- packaging==24.2
- pandas==1.4.2
- pillow==11.1.0
- pint==0.19.1
- pluggy==1.5.0
- pooch==1.6.0
- pyparsing==3.2.3
- pyproj==3.3.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.32.3
- scipy==1.8.0
- six==1.17.0
- tomli==2.2.1
- traitlets==5.1.1
- typing-extensions==4.13.0
- urllib3==2.3.0
- xarray==2022.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/MetPy
| [
"tests/test_xarray.py::test_convert_to_base_units"
] | [
"tests/test_xarray.py::test_units_percent",
"tests/test_xarray.py::test_time_deltas"
] | [
"tests/test_xarray.py::test_pyproj_projection",
"tests/test_xarray.py::test_no_projection",
"tests/test_xarray.py::test_unit_array",
"tests/test_xarray.py::test_units",
"tests/test_xarray.py::test_units_data",
"tests/test_xarray.py::test_magnitude_with_quantity",
"tests/test_xarray.py::test_magnitude_without_quantity",
"tests/test_xarray.py::test_convert_units",
"tests/test_xarray.py::test_convert_coordinate_units",
"tests/test_xarray.py::test_quantify",
"tests/test_xarray.py::test_dequantify",
"tests/test_xarray.py::test_dataset_quantify",
"tests/test_xarray.py::test_dataset_dequantify",
"tests/test_xarray.py::test_radian_projection_coords",
"tests/test_xarray.py::test_missing_grid_mapping_valid",
"tests/test_xarray.py::test_missing_grid_mapping_invalid",
"tests/test_xarray.py::test_missing_grid_mapping_var",
"tests/test_xarray.py::test_parsecf_crs",
"tests/test_xarray.py::test_parsecf_existing_scalar_crs",
"tests/test_xarray.py::test_parsecf_existing_vector_crs",
"tests/test_xarray.py::test_preprocess_and_wrap_only_preprocessing",
"tests/test_xarray.py::test_coordinates_basic_by_method",
"tests/test_xarray.py::test_coordinates_basic_by_property",
"tests/test_xarray.py::test_coordinates_specified_by_name_with_dataset",
"tests/test_xarray.py::test_coordinates_specified_by_dataarray_with_dataset",
"tests/test_xarray.py::test_missing_coordinate_type",
"tests/test_xarray.py::test_assign_coordinates_not_overwrite",
"tests/test_xarray.py::test_resolve_axis_conflict_lonlat_and_xy",
"tests/test_xarray.py::test_resolve_axis_conflict_double_lonlat",
"tests/test_xarray.py::test_resolve_axis_conflict_double_xy",
"tests/test_xarray.py::test_resolve_axis_conflict_double_x_with_single_dim",
"tests/test_xarray.py::test_resolve_axis_conflict_double_vertical",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple0]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple1]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple2]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple3]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple4]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple5]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple6]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple7]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple8]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple9]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple10]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple11]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple12]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple13]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple14]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple15]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple16]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple17]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple18]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple19]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple20]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple21]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple22]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple23]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple24]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple25]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple26]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple27]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple28]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple29]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple30]",
"tests/test_xarray.py::test_check_axis_criterion_match[test_tuple31]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple0]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple1]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple2]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple3]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple4]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple5]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple6]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple7]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple8]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple9]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple10]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple11]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple12]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple13]",
"tests/test_xarray.py::test_check_axis_unit_match[test_tuple14]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple0]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple1]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple2]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple3]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple4]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple5]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple6]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple7]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple8]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple9]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple10]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple11]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple12]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple13]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple14]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple15]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple16]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple17]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple18]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple19]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple20]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple21]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple22]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple23]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple24]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple25]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple26]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple27]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple28]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple29]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple30]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple31]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple32]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple33]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple34]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple35]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple36]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple37]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple38]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple39]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple40]",
"tests/test_xarray.py::test_check_axis_regular_expression_match[test_tuple41]",
"tests/test_xarray.py::test_narr_example_variable_without_grid_mapping",
"tests/test_xarray.py::test_coordinates_identical_true",
"tests/test_xarray.py::test_coordinates_identical_false_number_of_coords",
"tests/test_xarray.py::test_coordinates_identical_false_coords_mismatch",
"tests/test_xarray.py::test_check_matching_coordinates",
"tests/test_xarray.py::test_find_axis_name_integer",
"tests/test_xarray.py::test_find_axis_name_axis_type",
"tests/test_xarray.py::test_find_axis_name_dim_coord_name",
"tests/test_xarray.py::test_find_axis_name_bad_identifier",
"tests/test_xarray.py::test_find_axis_number_integer",
"tests/test_xarray.py::test_find_axis_number_axis_type",
"tests/test_xarray.py::test_find_axis_number_dim_coord_number",
"tests/test_xarray.py::test_find_axis_number_bad_identifier",
"tests/test_xarray.py::test_cf_parse_with_grid_mapping",
"tests/test_xarray.py::test_data_array_loc_get_with_units",
"tests/test_xarray.py::test_data_array_loc_set_with_units",
"tests/test_xarray.py::test_data_array_loc_with_ellipsis",
"tests/test_xarray.py::test_data_array_loc_non_tuple",
"tests/test_xarray.py::test_data_array_loc_too_many_indices",
"tests/test_xarray.py::test_data_array_sel_dict_with_units",
"tests/test_xarray.py::test_data_array_sel_kwargs_with_units",
"tests/test_xarray.py::test_dataset_loc_with_units",
"tests/test_xarray.py::test_dataset_sel_kwargs_with_units",
"tests/test_xarray.py::test_dataset_sel_non_dict_pos_arg",
"tests/test_xarray.py::test_dataset_sel_mixed_dict_and_kwarg",
"tests/test_xarray.py::test_dataset_loc_without_dict",
"tests/test_xarray.py::test_dataset_parse_cf_keep_attrs",
"tests/test_xarray.py::test_check_axis_with_bad_unit",
"tests/test_xarray.py::test_dataset_parse_cf_varname_list",
"tests/test_xarray.py::test_coordinate_identification_shared_but_not_equal_coords",
"tests/test_xarray.py::test_one_dimensional_lat_lon",
"tests/test_xarray.py::test_auxilary_lat_lon_with_xy",
"tests/test_xarray.py::test_auxilary_lat_lon_without_xy",
"tests/test_xarray.py::test_auxilary_lat_lon_without_xy_as_xy",
"tests/test_xarray.py::test_assign_crs_error_with_both_attrs",
"tests/test_xarray.py::test_assign_crs_error_with_neither_attrs",
"tests/test_xarray.py::test_assign_latitude_longitude_no_horizontal",
"tests/test_xarray.py::test_assign_y_x_no_horizontal",
"tests/test_xarray.py::test_assign_latitude_longitude_basic_dataarray",
"tests/test_xarray.py::test_assign_latitude_longitude_error_existing_dataarray",
"tests/test_xarray.py::test_assign_latitude_longitude_force_existing_dataarray",
"tests/test_xarray.py::test_assign_latitude_longitude_basic_dataset",
"tests/test_xarray.py::test_assign_y_x_basic_dataarray",
"tests/test_xarray.py::test_assign_y_x_error_existing_dataarray",
"tests/test_xarray.py::test_assign_y_x_force_existing_dataarray",
"tests/test_xarray.py::test_assign_y_x_dataarray_outside_tolerance",
"tests/test_xarray.py::test_assign_y_x_dataarray_transposed",
"tests/test_xarray.py::test_assign_y_x_dataset_assumed_order",
"tests/test_xarray.py::test_assign_y_x_error_existing_dataset",
"tests/test_xarray.py::test_update_attribute_dictionary",
"tests/test_xarray.py::test_update_attribute_callable",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test0-other0-False-expected0]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test1-other1-True-expected1]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test2-other2-False-expected2]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test3-other3-True-expected3]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test4-other4-False-expected4]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test5-other5-True-expected5]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test6-other6-False-expected6]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test7-other7-True-expected7]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test8-other8-False-expected8]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test9-other9-True-expected9]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test10-other10-False-expected10]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test11-other11-False-expected11]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test12-other12-True-expected12]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test13-other13-False-expected13]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg[test14-other14-True-expected14]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg_raising_dimensionality_error[test0-other0]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg_raising_dimensionality_error[test1-other1]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg_raising_dimensionality_error[test2-other2]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg_raising_dimensionality_error[test3-other3]",
"tests/test_xarray.py::test_wrap_with_wrap_like_kwarg_raising_dimensionality_error[test4-other4]",
"tests/test_xarray.py::test_wrap_with_argument_kwarg",
"tests/test_xarray.py::test_preprocess_and_wrap_with_broadcasting",
"tests/test_xarray.py::test_preprocess_and_wrap_broadcasting_error",
"tests/test_xarray.py::test_preprocess_and_wrap_with_to_magnitude",
"tests/test_xarray.py::test_preprocess_and_wrap_with_variable",
"tests/test_xarray.py::test_grid_deltas_from_dataarray_lonlat",
"tests/test_xarray.py::test_grid_deltas_from_dataarray_xy",
"tests/test_xarray.py::test_grid_deltas_from_dataarray_nominal_lonlat",
"tests/test_xarray.py::test_grid_deltas_from_dataarray_lonlat_assumed_order",
"tests/test_xarray.py::test_grid_deltas_from_dataarray_invalid_kind",
"tests/test_xarray.py::test_add_grid_arguments_from_dataarray",
"tests/test_xarray.py::test_add_vertical_dim_from_xarray"
] | [] | BSD 3-Clause "New" or "Revised" License | 12,599 | 416 | [
"src/metpy/xarray.py",
"tutorials/xarray_tutorial.py"
] |
|
bids-standard__pybids-836 | 37dc6eadfdcfbbf61ad930710fb703a6b48cd49c | 2022-04-11 21:17:24 | df945f499ab19ea5ee95b2cb3e0ec401984231da | diff --git a/bids/modeling/transformations/base.py b/bids/modeling/transformations/base.py
index 3c87f69f..85d5d1d2 100644
--- a/bids/modeling/transformations/base.py
+++ b/bids/modeling/transformations/base.py
@@ -91,6 +91,11 @@ class Transformation(metaclass=ABCMeta):
# be passed through as-is even if categorical.
_allow_categorical = None
+ # Boolean indicating whether to treat each key word argument as a one-to-one
+ # mapping with each variable or to treat the key word argument as applying to
+ # every input variable.
+ _sync_kwargs = True
+
def __new__(cls, collection, variables, *args, **kwargs):
t = super(Transformation, cls).__new__(cls)
t._setup(collection, variables, *args, **kwargs)
@@ -117,7 +122,11 @@ class Transformation(metaclass=ABCMeta):
# 'variables'
kwargs[arg_spec.args[2 + i]] = arg_val
- self.kwargs = kwargs
+ # listify kwargs if synced
+ if self._sync_kwargs:
+ self.kwargs = {k: listify(v) for k, v in kwargs.items()}
+ else:
+ self.kwargs = kwargs
# Expand any detected variable group names or wild cards
self._expand_variable_groups()
@@ -255,20 +264,22 @@ class Transformation(metaclass=ABCMeta):
if not self._loopable:
variables = [variables]
+ i_kwargs = kwargs
for i, col in enumerate(variables):
-
+ if self._sync_kwargs:
+ i_kwargs = {k: v[i] for k, v in kwargs.items()}
# If we still have a list, pass all variables in one block
if isinstance(col, (list, tuple)):
- result = self._transform(data, **kwargs)
+ result = self._transform(data, **i_kwargs)
if self._return_type not in ['none', None]:
col = col[0].clone(data=result, name=self.output[0])
# Otherwise loop over variables individually
else:
if self._groupable and self.groupby is not None:
result = col.apply(self._transform, groupby=self.groupby,
- **kwargs)
+ **i_kwargs)
else:
- result = self._transform(data[i], **kwargs)
+ result = self._transform(data[i], **i_kwargs)
if self._return_type in ['none', None]:
continue
diff --git a/bids/modeling/transformations/compute.py b/bids/modeling/transformations/compute.py
index 71877c06..a2b1c0dd 100644
--- a/bids/modeling/transformations/compute.py
+++ b/bids/modeling/transformations/compute.py
@@ -192,6 +192,7 @@ class Sum(Transformation):
_groupable = False
_aligned_required = True
_output_required = True
+ _sync_kwargs = False
def _transform(self, data, weights=None):
data = pd.concat(data, axis=1, sort=True)
diff --git a/bids/modeling/transformations/munge.py b/bids/modeling/transformations/munge.py
index ea0e1e28..5cc312c8 100644
--- a/bids/modeling/transformations/munge.py
+++ b/bids/modeling/transformations/munge.py
@@ -299,6 +299,7 @@ class Split(Transformation):
_return_type = 'variable'
_allow_categorical = ('by',)
_densify = ('variables', 'by')
+ _sync_kwargs = False
def _transform(self, var, by):
| handling kwargs in loopable Transformations?
AFAICT the following is a valid Transformation:
```
{
"Name": "Assign",
"Input": ["response_time", "response_time", "response_time"],
"Target": ["pumps", "control_pumps", "cash"],
"Output": ["pumps_rt", "control_pumps_rt", "cash_rt"],
"InputAttr": ["value", "value", "value"],
"TargetAttr": ["duration", "duration", "duration"]
}
```
according to [the transformer specification](https://docs.google.com/document/d/1uxN6vPWbC7ciAx2XWtT5Y-lBrdckZKpPdNUNpwRxHoU/edit#) as long as "Target", "Output", "InputAttr", and "TargetAttr" are the same length, then the transformer should perform a one-to-one mapping of variables, instead I get the following traceback:
```
Traceback (most recent call last):
File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/nipype/interfaces/base/core.py", line 398, in run
runtime = self._run_interface(runtime)
File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/fitlins/interfaces/bids.py", line 250, in _run_interface
self._results['all_specs'] = self._load_graph(runtime, graph)
File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/fitlins/interfaces/bids.py", line 258, in _load_graph
specs = node.run(inputs, group_by=node.group_by, **filters)
File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/statsmodels.py", line 464, in run
node_output = BIDSStatsModelsNodeOutput(
File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/statsmodels.py", line 581, in __init__
dfs = self._collections_to_dfs(collections)
File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/statsmodels.py", line 657, in _collections_to_dfs
coll = transformer.transform(coll.clone(), transformations['instructions'])
File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/transformations/base.py", line 465, in transform
func(collection, cols, **kwargs)
File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/transformations/base.py", line 97, in __new__
return t.transform()
File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/transformations/base.py", line 271, in transform
result = self._transform(data[i], **kwargs)
File "/opt/miniconda-latest/envs/neuro/lib/python3.9/site-packages/bids/modeling/transformations/munge.py", line 39, in _transform
target = self.collection.variables[target].clone()
TypeError: unhashable type: 'list'
``` | bids-standard/pybids | diff --git a/bids/modeling/tests/test_transformations.py b/bids/modeling/tests/test_transformations.py
index 91b15506..4415fd6e 100644
--- a/bids/modeling/tests/test_transformations.py
+++ b/bids/modeling/tests/test_transformations.py
@@ -344,6 +344,20 @@ def test_assign(collection):
assert np.array_equal(t2.duration, pg.duration)
+def test_assign_multiple(collection):
+ # test kwarg distribution
+ transform.Assign(collection, ['RT', 'respcat'], target=['gain', 'loss'],
+ input_attr=['amplitude', 'amplitude'], target_attr=['duration', 'amplitude'],
+ output=['gain_rt', 'loss_cat'])
+ rt = collection['RT']
+ gain_rt = collection['gain_rt']
+ loss_cat = collection['loss_cat']
+ rc = collection['respcat']
+
+ assert np.array_equal(gain_rt.duration, rt.values.values)
+ assert np.array_equal(loss_cat.values.values, rc.values.values)
+
+
def test_copy(collection):
transform.Copy(collection, 'RT', output='RT_copy')
assert 'RT_copy' in collection.variables.keys()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 3
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astor==0.8.1
attrs==25.3.0
bids-validator==1.14.7.post0
bidsschematools==1.0.4
click==8.1.8
docopt==0.6.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
formulaic==0.3.4
importlib_resources==6.5.2
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
interface-meta==1.3.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
nibabel==5.3.2
num2words==0.5.14
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pluggy @ file:///croot/pluggy_1733169602837/work
-e git+https://github.com/bids-standard/pybids.git@37dc6eadfdcfbbf61ad930710fb703a6b48cd49c#egg=pybids
pytest @ file:///croot/pytest_1738938843180/work
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
referencing==0.36.2
rpds-py==0.24.0
scipy==1.13.1
six==1.17.0
SQLAlchemy==1.3.24
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
tzdata==2025.2
wrapt==1.17.2
zipp==3.21.0
| name: pybids
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astor==0.8.1
- attrs==25.3.0
- bids-validator==1.14.7.post0
- bidsschematools==1.0.4
- click==8.1.8
- docopt==0.6.2
- formulaic==0.3.4
- importlib-resources==6.5.2
- interface-meta==1.3.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- nibabel==5.3.2
- num2words==0.5.14
- numpy==2.0.2
- pandas==2.2.3
- pybids==0.15.1.post0.dev85
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- referencing==0.36.2
- rpds-py==0.24.0
- scipy==1.13.1
- six==1.17.0
- sqlalchemy==1.3.24
- typing-extensions==4.13.0
- tzdata==2025.2
- wrapt==1.17.2
- zipp==3.21.0
prefix: /opt/conda/envs/pybids
| [
"bids/modeling/tests/test_transformations.py::test_assign_multiple"
] | [
"bids/modeling/tests/test_transformations.py::test_orthogonalize_dense",
"bids/modeling/tests/test_transformations.py::test_orthogonalize_sparse"
] | [
"bids/modeling/tests/test_transformations.py::test_convolve",
"bids/modeling/tests/test_transformations.py::test_convolve_impulse",
"bids/modeling/tests/test_transformations.py::test_rename",
"bids/modeling/tests/test_transformations.py::test_product",
"bids/modeling/tests/test_transformations.py::test_sum",
"bids/modeling/tests/test_transformations.py::test_scale",
"bids/modeling/tests/test_transformations.py::test_demean",
"bids/modeling/tests/test_transformations.py::test_split",
"bids/modeling/tests/test_transformations.py::test_resample_dense",
"bids/modeling/tests/test_transformations.py::test_threshold",
"bids/modeling/tests/test_transformations.py::test_assign",
"bids/modeling/tests/test_transformations.py::test_copy",
"bids/modeling/tests/test_transformations.py::test_expand_variable_names",
"bids/modeling/tests/test_transformations.py::test_factor",
"bids/modeling/tests/test_transformations.py::test_filter",
"bids/modeling/tests/test_transformations.py::test_replace",
"bids/modeling/tests/test_transformations.py::test_select",
"bids/modeling/tests/test_transformations.py::test_delete",
"bids/modeling/tests/test_transformations.py::test_and",
"bids/modeling/tests/test_transformations.py::test_or",
"bids/modeling/tests/test_transformations.py::test_not",
"bids/modeling/tests/test_transformations.py::test_dropna",
"bids/modeling/tests/test_transformations.py::test_group",
"bids/modeling/tests/test_transformations.py::test_resample",
"bids/modeling/tests/test_transformations.py::test_Lag"
] | [] | MIT License | 12,604 | 884 | [
"bids/modeling/transformations/base.py",
"bids/modeling/transformations/compute.py",
"bids/modeling/transformations/munge.py"
] |
|
networkx__networkx-5523 | b79768389070c5533a5ae21afce15dd06cd2cff0 | 2022-04-12 17:38:58 | c464814ed02635698eafb8bb8b567f778978dd14 | diff --git a/networkx/algorithms/planarity.py b/networkx/algorithms/planarity.py
index 4d1441efc..8f4b29096 100644
--- a/networkx/algorithms/planarity.py
+++ b/networkx/algorithms/planarity.py
@@ -24,6 +24,18 @@ def check_planarity(G, counterexample=False):
If the graph is planar `certificate` is a PlanarEmbedding
otherwise it is a Kuratowski subgraph.
+ Examples
+ --------
+ >>> G = nx.Graph([(0, 1), (0, 2)])
+ >>> is_planar, P = nx.check_planarity(G)
+ >>> print(is_planar)
+ True
+
+ When `G` is planar, a `PlanarEmbedding` instance is returned:
+
+ >>> P.get_data()
+ {0: [1, 2], 1: [0], 2: [0]}
+
Notes
-----
A (combinatorial) embedding consists of cyclic orderings of the incident
@@ -716,6 +728,8 @@ class PlanarEmbedding(nx.DiGraph):
The planar embedding is given by a `combinatorial embedding
<https://en.wikipedia.org/wiki/Graph_embedding#Combinatorial_embedding>`_.
+ .. note:: `check_planarity` is the preferred way to check if a graph is planar.
+
**Neighbor ordering:**
In comparison to a usual graph structure, the embedding also stores the
@@ -761,6 +775,13 @@ class PlanarEmbedding(nx.DiGraph):
For a half-edge (u, v) that is orientated such that u is below v then the
face that belongs to (u, v) is to the right of this half-edge.
+ See Also
+ --------
+ is _planar :
+ Preferred way to check if an existing graph is planar.
+ check_planarity :
+ A convenient way to create a `PlanarEmbedding`. If not planar, it returns a subgraph that shows this.
+
Examples
--------
diff --git a/networkx/algorithms/similarity.py b/networkx/algorithms/similarity.py
index d5f463536..2447a9dce 100644
--- a/networkx/algorithms/similarity.py
+++ b/networkx/algorithms/similarity.py
@@ -745,18 +745,30 @@ def optimize_edit_paths(
N = len(pending_h)
# assert Ce.C.shape == (M + N, M + N)
- g_ind = [
- i
- for i in range(M)
- if pending_g[i][:2] == (u, u)
- or any(pending_g[i][:2] in ((p, u), (u, p)) for p, q in matched_uv)
- ]
- h_ind = [
- j
- for j in range(N)
- if pending_h[j][:2] == (v, v)
- or any(pending_h[j][:2] in ((q, v), (v, q)) for p, q in matched_uv)
- ]
+ # only attempt to match edges after one node match has been made
+ # this will stop self-edges on the first node being automatically deleted
+ # even when a substitution is the better option
+ if matched_uv:
+ g_ind = [
+ i
+ for i in range(M)
+ if pending_g[i][:2] == (u, u)
+ or any(
+ pending_g[i][:2] in ((p, u), (u, p), (p, p)) for p, q in matched_uv
+ )
+ ]
+ h_ind = [
+ j
+ for j in range(N)
+ if pending_h[j][:2] == (v, v)
+ or any(
+ pending_h[j][:2] in ((q, v), (v, q), (q, q)) for p, q in matched_uv
+ )
+ ]
+ else:
+ g_ind = []
+ h_ind = []
+
m = len(g_ind)
n = len(h_ind)
@@ -782,9 +794,9 @@ def optimize_edit_paths(
for p, q in matched_uv
):
continue
- if g == (u, u):
+ if g == (u, u) or any(g == (p, p) for p, q in matched_uv):
continue
- if h == (v, v):
+ if h == (v, v) or any(h == (q, q) for p, q in matched_uv):
continue
C[k, l] = inf
diff --git a/networkx/algorithms/tree/recognition.py b/networkx/algorithms/tree/recognition.py
index 5fbff544e..52da959b6 100644
--- a/networkx/algorithms/tree/recognition.py
+++ b/networkx/algorithms/tree/recognition.py
@@ -157,6 +157,21 @@ def is_forest(G):
b : bool
A boolean that is True if `G` is a forest.
+ Raises
+ ------
+ NetworkXPointlessConcept
+ If `G` is empty.
+
+ Examples
+ --------
+ >>> G = nx.Graph()
+ >>> G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)])
+ >>> nx.is_forest(G)
+ True
+ >>> G.add_edge(4, 1)
+ >>> nx.is_forest(G)
+ False
+
Notes
-----
In another convention, a directed forest is known as a *polyforest* and
@@ -198,6 +213,21 @@ def is_tree(G):
b : bool
A boolean that is True if `G` is a tree.
+ Raises
+ ------
+ NetworkXPointlessConcept
+ If `G` is empty.
+
+ Examples
+ --------
+ >>> G = nx.Graph()
+ >>> G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5)])
+ >>> nx.is_tree(G) # n-1 edges
+ True
+ >>> G.add_edge(3, 4)
+ >>> nx.is_tree(G) # n edges
+ False
+
Notes
-----
In another convention, a directed tree is known as a *polytree* and then
diff --git a/networkx/utils/decorators.py b/networkx/utils/decorators.py
index e8e4bff55..4e065c50b 100644
--- a/networkx/utils/decorators.py
+++ b/networkx/utils/decorators.py
@@ -464,6 +464,9 @@ class argmap:
function constructs an object (like a file handle) that requires
post-processing (like closing).
+ Note: try_finally decorators cannot be used to decorate generator
+ functions.
+
Examples
--------
Most of these examples use `@argmap(...)` to apply the decorator to
@@ -606,6 +609,38 @@ class argmap:
# this code doesn't need to worry about closing the file
print(file.read())
+ Decorators with try_finally = True cannot be used with generator functions,
+ because the `finally` block is evaluated before the generator is exhausted::
+
+ @argmap(open_file, "file", try_finally=True)
+ def file_to_lines(file):
+ for line in file.readlines():
+ yield line
+
+ is equivalent to::
+
+ def file_to_lines_wrapped(file):
+ for line in file.readlines():
+ yield line
+
+ def file_to_lines_wrapper(file):
+ try:
+ file = open_file(file)
+ return file_to_lines_wrapped(file)
+ finally:
+ file.close()
+
+ which behaves similarly to::
+
+ def file_to_lines_whoops(file):
+ file = open_file(file)
+ file.close()
+ for line in file.readlines():
+ yield line
+
+ because the `finally` block of `file_to_lines_wrapper` is executed before
+ the caller has a chance to exhaust the iterator.
+
Notes
-----
An object of this class is callable and intended to be used when
@@ -805,15 +840,8 @@ class argmap:
argmap._lazy_compile
"""
- if inspect.isgeneratorfunction(f):
-
- def func(*args, __wrapper=None, **kwargs):
- yield from argmap._lazy_compile(__wrapper)(*args, **kwargs)
-
- else:
-
- def func(*args, __wrapper=None, **kwargs):
- return argmap._lazy_compile(__wrapper)(*args, **kwargs)
+ def func(*args, __wrapper=None, **kwargs):
+ return argmap._lazy_compile(__wrapper)(*args, **kwargs)
# standard function-wrapping stuff
func.__name__ = f.__name__
@@ -843,6 +871,14 @@ class argmap:
# this is used to variously call self.assemble and self.compile
func.__argmap__ = self
+ if hasattr(f, "__argmap__"):
+ func.__is_generator = f.__is_generator
+ else:
+ func.__is_generator = inspect.isgeneratorfunction(f)
+
+ if self._finally and func.__is_generator:
+ raise nx.NetworkXError("argmap cannot decorate generators with try_finally")
+
return func
__count = 0
@@ -1162,12 +1198,7 @@ class argmap:
fname = cls._name(f)
def_sig = f'def {fname}({", ".join(def_sig)}):'
- if inspect.isgeneratorfunction(f):
- _return = "yield from"
- else:
- _return = "return"
-
- call_sig = f"{_return} {{}}({', '.join(call_sig)})"
+ call_sig = f"return {{}}({', '.join(call_sig)})"
return cls.Signature(fname, sig, def_sig, call_sig, names, npos, args, kwargs)
| Update documentation for planar embedding
Let's update the documentation to make it clear that the `check_planarity` function is the primary interface for the planar embedding tools. Also, the class `PlanarEmbedding` is tricky to make sure it maintains the planar data structure. People not familiar with those ideas should definitely start with the `check_planarity` function.
See discussion in #5079 | networkx/networkx | diff --git a/networkx/algorithms/tests/test_similarity.py b/networkx/algorithms/tests/test_similarity.py
index c4fd17f12..9b620dece 100644
--- a/networkx/algorithms/tests/test_similarity.py
+++ b/networkx/algorithms/tests/test_similarity.py
@@ -894,3 +894,27 @@ class TestSimilarity:
assert expected_paths == list(paths)
assert expected_map == index_map
+
+ def test_symmetry_with_custom_matching(self):
+ print("G2 is edge (a,b) and G3 is edge (a,a)")
+ print("but node order for G2 is (a,b) while for G3 it is (b,a)")
+
+ a, b = "A", "B"
+ G2 = nx.Graph()
+ G2.add_nodes_from((a, b))
+ G2.add_edges_from([(a, b)])
+ G3 = nx.Graph()
+ G3.add_nodes_from((b, a))
+ G3.add_edges_from([(a, a)])
+ for G in (G2, G3):
+ for n in G:
+ G.nodes[n]["attr"] = n
+ for e in G.edges:
+ G.edges[e]["attr"] = e
+ match = lambda x, y: x == y
+
+ print("Starting G2 to G3 GED calculation")
+ assert nx.graph_edit_distance(G2, G3, node_match=match, edge_match=match) == 1
+
+ print("Starting G3 to G2 GED calculation")
+ assert nx.graph_edit_distance(G3, G2, node_match=match, edge_match=match) == 1
diff --git a/networkx/utils/tests/test_decorators.py b/networkx/utils/tests/test_decorators.py
index eee48fd48..3be29e549 100644
--- a/networkx/utils/tests/test_decorators.py
+++ b/networkx/utils/tests/test_decorators.py
@@ -380,29 +380,25 @@ class TestArgmap:
# context exits are called in reverse
assert container == ["c", "b", "a"]
- def test_contextmanager_iterator(self):
+ def test_tryfinally_generator(self):
container = []
- def contextmanager(x):
- nonlocal container
- return x, lambda: container.append(x)
+ def singleton(x):
+ return (x,)
- @argmap(contextmanager, 0, 1, 2, try_finally=True)
+ with pytest.raises(nx.NetworkXError):
+
+ @argmap(singleton, 0, 1, 2, try_finally=True)
+ def foo(x, y, z):
+ yield from (x, y, z)
+
+ @argmap(singleton, 0, 1, 2)
def foo(x, y, z):
- yield from (x, y, z)
+ return x + y + z
q = foo("a", "b", "c")
- assert next(q) == "a"
- assert container == []
- assert next(q) == "b"
- assert container == []
- assert next(q) == "c"
- assert container == []
- with pytest.raises(StopIteration):
- next(q)
- # context exits are called in reverse
- assert container == ["c", "b", "a"]
+ assert q == ("a", "b", "c")
def test_actual_vararg(self):
@argmap(lambda x: -x, 4)
@@ -485,7 +481,25 @@ finally:
pass#"""
)
- def nop(self):
- print(foo.__argmap__.assemble(foo.__wrapped__))
- argmap._lazy_compile(foo)
- print(foo._code)
+ def test_immediate_raise(self):
+ @not_implemented_for("directed")
+ def yield_nodes(G):
+ yield from G
+
+ G = nx.Graph([(1, 2)])
+ D = nx.DiGraph()
+
+ # test first call (argmap is compiled and executed)
+ with pytest.raises(nx.NetworkXNotImplemented):
+ node_iter = yield_nodes(D)
+
+ # test second call (argmap is only executed)
+ with pytest.raises(nx.NetworkXNotImplemented):
+ node_iter = yield_nodes(D)
+
+ # ensure that generators still make generators
+ node_iter = yield_nodes(G)
+ next(node_iter)
+ next(node_iter)
+ with pytest.raises(StopIteration):
+ next(node_iter)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 4
} | 2.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[default]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest>=7.1",
"pytest-cov>=3.0",
"codecov>=2.1",
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements/default.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
codecov==2.1.13
contourpy==1.3.0
coverage==7.8.0
cycler==0.12.1
exceptiongroup==1.2.2
fonttools==4.56.0
idna==3.10
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
matplotlib==3.9.4
-e git+https://github.com/networkx/networkx.git@b79768389070c5533a5ae21afce15dd06cd2cff0#egg=networkx
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pillow==11.1.0
pluggy==1.5.0
pyparsing==3.2.3
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.32.3
scipy==1.13.1
six==1.17.0
tomli==2.2.1
tzdata==2025.2
urllib3==2.3.0
zipp==3.21.0
| name: networkx
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- codecov==2.1.13
- contourpy==1.3.0
- coverage==7.8.0
- cycler==0.12.1
- exceptiongroup==1.2.2
- fonttools==4.56.0
- idna==3.10
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- matplotlib==3.9.4
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pillow==11.1.0
- pluggy==1.5.0
- pyparsing==3.2.3
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.32.3
- scipy==1.13.1
- six==1.17.0
- tomli==2.2.1
- tzdata==2025.2
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/networkx
| [
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_symmetry_with_custom_matching",
"networkx/utils/tests/test_decorators.py::TestArgmap::test_tryfinally_generator",
"networkx/utils/tests/test_decorators.py::TestArgmap::test_immediate_raise"
] | [] | [
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_graph_edit_distance_roots_and_timeout",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_graph_edit_distance",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_graph_edit_distance_node_match",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_graph_edit_distance_edge_match",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_graph_edit_distance_node_cost",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_graph_edit_distance_edge_cost",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_graph_edit_distance_upper_bound",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_optimal_edit_paths",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_optimize_graph_edit_distance",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_selfloops",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_digraph",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_multigraph",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_multidigraph",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::testCopy",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::testSame",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::testOneEdgeLabelDiff",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::testOneNodeLabelDiff",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::testOneExtraNode",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::testOneExtraEdge",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::testOneExtraNodeAndEdge",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::testGraph1",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::testGraph2",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::testGraph3",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::testGraph4",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::testGraph4_a",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::testGraph4_b",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_no_source_no_target[simrank_similarity]",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_no_source_no_target[_simrank_similarity_python]",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_source_no_target[simrank_similarity]",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_source_no_target[_simrank_similarity_python]",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_noninteger_nodes[simrank_similarity]",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_noninteger_nodes[_simrank_similarity_python]",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_source_and_target[simrank_similarity]",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_source_and_target[_simrank_similarity_python]",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_max_iterations[simrank_similarity]",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_max_iterations[_simrank_similarity_python]",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_between_versions",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_numpy_no_source_no_target",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_numpy_source_no_target",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_simrank_numpy_source_and_target",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_panther_similarity_unweighted",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_panther_similarity_weighted",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_generate_random_paths_unweighted",
"networkx/algorithms/tests/test_similarity.py::TestSimilarity::test_generate_random_paths_weighted",
"networkx/utils/tests/test_decorators.py::test_not_implemented_decorator",
"networkx/utils/tests/test_decorators.py::test_not_implemented_decorator_key",
"networkx/utils/tests/test_decorators.py::test_not_implemented_decorator_raise",
"networkx/utils/tests/test_decorators.py::TestOpenFileDecorator::test_writer_arg0_str",
"networkx/utils/tests/test_decorators.py::TestOpenFileDecorator::test_writer_arg0_fobj",
"networkx/utils/tests/test_decorators.py::TestOpenFileDecorator::test_writer_arg0_pathlib",
"networkx/utils/tests/test_decorators.py::TestOpenFileDecorator::test_writer_arg1_str",
"networkx/utils/tests/test_decorators.py::TestOpenFileDecorator::test_writer_arg1_fobj",
"networkx/utils/tests/test_decorators.py::TestOpenFileDecorator::test_writer_arg2default_str",
"networkx/utils/tests/test_decorators.py::TestOpenFileDecorator::test_writer_arg2default_fobj",
"networkx/utils/tests/test_decorators.py::TestOpenFileDecorator::test_writer_arg2default_fobj_path_none",
"networkx/utils/tests/test_decorators.py::TestOpenFileDecorator::test_writer_arg4default_fobj",
"networkx/utils/tests/test_decorators.py::TestOpenFileDecorator::test_writer_kwarg_str",
"networkx/utils/tests/test_decorators.py::TestOpenFileDecorator::test_writer_kwarg_fobj",
"networkx/utils/tests/test_decorators.py::TestOpenFileDecorator::test_writer_kwarg_path_none",
"networkx/utils/tests/test_decorators.py::test_preserve_random_state",
"networkx/utils/tests/test_decorators.py::TestRandomState::test_random_state_None",
"networkx/utils/tests/test_decorators.py::TestRandomState::test_random_state_np_random",
"networkx/utils/tests/test_decorators.py::TestRandomState::test_random_state_int",
"networkx/utils/tests/test_decorators.py::TestRandomState::test_random_state_np_random_RandomState",
"networkx/utils/tests/test_decorators.py::TestRandomState::test_random_state_py_random",
"networkx/utils/tests/test_decorators.py::test_random_state_string_arg_index",
"networkx/utils/tests/test_decorators.py::test_py_random_state_string_arg_index",
"networkx/utils/tests/test_decorators.py::test_random_state_invalid_arg_index",
"networkx/utils/tests/test_decorators.py::test_py_random_state_invalid_arg_index",
"networkx/utils/tests/test_decorators.py::TestArgmap::test_trivial_function",
"networkx/utils/tests/test_decorators.py::TestArgmap::test_trivial_iterator",
"networkx/utils/tests/test_decorators.py::TestArgmap::test_contextmanager",
"networkx/utils/tests/test_decorators.py::TestArgmap::test_actual_vararg",
"networkx/utils/tests/test_decorators.py::TestArgmap::test_signature_destroying_intermediate_decorator",
"networkx/utils/tests/test_decorators.py::TestArgmap::test_actual_kwarg",
"networkx/utils/tests/test_decorators.py::TestArgmap::test_nested_tuple",
"networkx/utils/tests/test_decorators.py::TestArgmap::test_flatten",
"networkx/utils/tests/test_decorators.py::TestArgmap::test_indent"
] | [] | BSD 3-Clause | 12,611 | 2,387 | [
"networkx/algorithms/planarity.py",
"networkx/algorithms/similarity.py",
"networkx/algorithms/tree/recognition.py",
"networkx/utils/decorators.py"
] |
|
bids-standard__pybids-838 | 5ea3103b6dada5d3fd02cdec7d6498817d76f366 | 2022-04-12 20:53:53 | df945f499ab19ea5ee95b2cb3e0ec401984231da | diff --git a/bids/modeling/transformations/base.py b/bids/modeling/transformations/base.py
index 55588098..3c87f69f 100644
--- a/bids/modeling/transformations/base.py
+++ b/bids/modeling/transformations/base.py
@@ -313,8 +313,17 @@ class Transformation(metaclass=ABCMeta):
if self.output_suffix is not None:
_output += self.output_suffix
- col.name = _output
- self.collection[_output] = col
+ # If multiple variables were returned, add each one separately
+ if isinstance(result, (list, tuple)):
+ # rename first output
+ result[0].name = _output
+ self.collection[_output] = result[0]
+
+ for r in result[1:]:
+ self.collection[r.name] = r
+ else:
+ col.name = _output
+ self.collection[_output] = col
@abstractmethod
def _transform(self, **kwargs):
diff --git a/bids/modeling/transformations/compute.py b/bids/modeling/transformations/compute.py
index 9478f926..71877c06 100644
--- a/bids/modeling/transformations/compute.py
+++ b/bids/modeling/transformations/compute.py
@@ -39,7 +39,7 @@ class Convolve(Transformation):
-----
Uses the HRF convolution functions implemented in nistats.
"""
-
+ _groupable = False
_input_type = 'variable'
_return_type = 'variable'
@@ -93,9 +93,16 @@ class Convolve(Transformation):
oversampling=np.ceil(effective_sr / sampling_rate)
)
- return DenseRunVariable(
- name=var.name, values=convolved[0], run_info=var.run_info,
- source=var.source, sampling_rate=sampling_rate)
+ results = []
+ arr, names = convolved
+ for conv, name in zip(np.split(arr, arr.shape[1], axis=1), names):
+ new_name = '_'.join([var.name, name.split('_')[-1]]) if '_' in name else var.name
+ results.append(
+ DenseRunVariable(
+ name=new_name, values=conv, run_info=var.run_info,
+ source=var.source, sampling_rate=sampling_rate)
+ )
+ return results
class Demean(Transformation):
| Convolve can return multiple regressors (not just one)
currently the implementation of Convolve assumes the hrf function returns an array with only one axis expected to have values. but if 'glover + derivatives' is included, I would expect two columns to be created (orig and orig_derivative). | bids-standard/pybids | diff --git a/bids/modeling/tests/test_transformations.py b/bids/modeling/tests/test_transformations.py
index e79ee6a0..91b15506 100644
--- a/bids/modeling/tests/test_transformations.py
+++ b/bids/modeling/tests/test_transformations.py
@@ -58,18 +58,22 @@ def sparse_run_variable_with_missing_values():
def test_convolve(collection):
rt = collection.variables['RT']
- transform.Convolve(collection, 'RT', output='reaction_time')
+ transform.Convolve(collection, ['RT'], output=['reaction_time'])
rt_conv = collection.variables['reaction_time']
assert rt_conv.values.shape[0] == \
rt.get_duration() * collection.sampling_rate
- transform.ToDense(collection, 'RT', output='rt_dense')
- transform.Convolve(collection, 'rt_dense', output='dense_convolved')
+ transform.ToDense(collection, ['RT'], output=['rt_dense'])
+ transform.Convolve(collection, 'rt_dense', derivative=True)
+
+ # test the derivative exists
+ assert collection.variables.get('rt_dense_derivative')
dense_conv = collection.variables['reaction_time']
assert dense_conv.values.shape[0] == \
+ collection.variables['rt_dense_derivative'].values.shape[0] == \
rt.get_duration() * collection.sampling_rate
# Test adapative oversampling computation
@@ -77,34 +81,40 @@ def test_convolve(collection):
# To resolve 1Hz frequencies, we must sample at >=2Hz
args = (mock.ANY, 'spm', mock.ANY)
kwargs = dict(fir_delays=None, min_onset=0)
- with mock.patch('bids.modeling.transformations.compute.hrf') as mocked:
+ mock_return = (np.array([[0, 0]]), ["cond"])
+ with mock.patch('bids.modeling.transformations.compute.hrf.compute_regressor') as mocked:
+ mocked.return_value = mock_return
# Sampling rate is 10Hz, no oversampling needed
transform.Convolve(collection, 'RT', output='rt_mock')
- mocked.compute_regressor.assert_called_with(*args, oversampling=1.0, **kwargs)
+ mocked.assert_called_with(*args, oversampling=1.0, **kwargs)
- with mock.patch('bids.modeling.transformations.compute.hrf') as mocked:
+ with mock.patch('bids.modeling.transformations.compute.hrf.compute_regressor') as mocked:
+ mocked.return_value = mock_return
# Sampling rate is 10Hz, no oversampling needed
transform.Convolve(collection, 'rt_dense', output='rt_mock')
- mocked.compute_regressor.assert_called_with(*args, oversampling=1.0, **kwargs)
+ mocked.assert_called_with(*args, oversampling=1.0, **kwargs)
- with mock.patch('bids.modeling.transformations.compute.hrf') as mocked:
+ with mock.patch('bids.modeling.transformations.compute.hrf.compute_regressor') as mocked:
+ mocked.return_value = mock_return
# Slow sampling rate, oversample (4x) to 2Hz
collection.sampling_rate = 0.5
transform.Convolve(collection, 'RT', output='rt_mock')
- mocked.compute_regressor.assert_called_with(*args, oversampling=4.0, **kwargs)
+ mocked.assert_called_with(*args, oversampling=4.0, **kwargs)
- with mock.patch('bids.modeling.transformations.compute.hrf') as mocked:
+ with mock.patch('bids.modeling.transformations.compute.hrf.compute_regressor') as mocked:
+ mocked.return_value = mock_return
# Dense variable is already sampled at 10Hz, no oversampling needed
collection.sampling_rate = 0.5
transform.Convolve(collection, 'rt_dense', output='rt_mock')
- mocked.compute_regressor.assert_called_with(*args, oversampling=1.0, **kwargs)
+ mocked.assert_called_with(*args, oversampling=1.0, **kwargs)
- with mock.patch('bids.modeling.transformations.compute.hrf') as mocked:
+ with mock.patch('bids.modeling.transformations.compute.hrf.compute_regressor') as mocked:
+ mocked.return_value = mock_return
# Onset requires 10Hz resolution, oversample (2x) to 20Hz
collection.sampling_rate = 10
collection['RT'].onset[0] += 0.1
transform.Convolve(collection, 'RT', output='rt_mock')
- mocked.compute_regressor.assert_called_with(*args, oversampling=2.0, **kwargs)
+ mocked.assert_called_with(*args, oversampling=2.0, **kwargs)
def test_convolve_impulse():
@@ -126,7 +136,7 @@ def test_convolve_impulse():
def test_rename(collection):
dense_rt = collection.variables['RT'].to_dense(collection.sampling_rate)
assert len(dense_rt.values) == math.ceil(len(SUBJECTS) * NRUNS * SCAN_LENGTH * collection.sampling_rate)
- transform.Rename(collection, 'RT', output='reaction_time')
+ transform.Rename(collection, ['RT'], output=['reaction_time'])
assert 'reaction_time' in collection.variables
assert 'RT' not in collection.variables
col = collection.variables['reaction_time']
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 2
} | 0.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
astor==0.8.1
attrs==25.3.0
babel==2.17.0
bids-validator==1.14.7.post0
bidsschematools==1.0.4
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
docopt==0.6.2
docutils==0.21.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
formulaic==0.3.4
graphviz==0.20.3
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
interface-meta==1.3.0
Jinja2==3.1.6
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
MarkupSafe==3.0.2
nibabel==5.3.2
num2words==0.5.14
numpy==2.0.2
numpydoc==1.8.0
packaging @ file:///croot/packaging_1734472117206/work
pandas==2.2.3
pluggy @ file:///croot/pluggy_1733169602837/work
-e git+https://github.com/bids-standard/pybids.git@5ea3103b6dada5d3fd02cdec7d6498817d76f366#egg=pybids
Pygments==2.19.1
pytest @ file:///croot/pytest_1738938843180/work
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
referencing==0.36.2
requests==2.32.3
rpds-py==0.24.0
scipy==1.13.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
SQLAlchemy==1.3.24
tabulate==0.9.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
wrapt==1.17.2
zipp==3.21.0
| name: pybids
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- astor==0.8.1
- attrs==25.3.0
- babel==2.17.0
- bids-validator==1.14.7.post0
- bidsschematools==1.0.4
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- docopt==0.6.2
- docutils==0.21.2
- formulaic==0.3.4
- graphviz==0.20.3
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- interface-meta==1.3.0
- jinja2==3.1.6
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- markupsafe==3.0.2
- nibabel==5.3.2
- num2words==0.5.14
- numpy==2.0.2
- numpydoc==1.8.0
- pandas==2.2.3
- pybids==0.15.1
- pygments==2.19.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- referencing==0.36.2
- requests==2.32.3
- rpds-py==0.24.0
- scipy==1.13.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinx-rtd-theme==3.0.2
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- sqlalchemy==1.3.24
- tabulate==0.9.0
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==2.3.0
- wrapt==1.17.2
- zipp==3.21.0
prefix: /opt/conda/envs/pybids
| [
"bids/modeling/tests/test_transformations.py::test_convolve"
] | [
"bids/modeling/tests/test_transformations.py::test_orthogonalize_dense",
"bids/modeling/tests/test_transformations.py::test_orthogonalize_sparse"
] | [
"bids/modeling/tests/test_transformations.py::test_convolve_impulse",
"bids/modeling/tests/test_transformations.py::test_rename",
"bids/modeling/tests/test_transformations.py::test_product",
"bids/modeling/tests/test_transformations.py::test_sum",
"bids/modeling/tests/test_transformations.py::test_scale",
"bids/modeling/tests/test_transformations.py::test_demean",
"bids/modeling/tests/test_transformations.py::test_split",
"bids/modeling/tests/test_transformations.py::test_resample_dense",
"bids/modeling/tests/test_transformations.py::test_threshold",
"bids/modeling/tests/test_transformations.py::test_assign",
"bids/modeling/tests/test_transformations.py::test_copy",
"bids/modeling/tests/test_transformations.py::test_expand_variable_names",
"bids/modeling/tests/test_transformations.py::test_factor",
"bids/modeling/tests/test_transformations.py::test_filter",
"bids/modeling/tests/test_transformations.py::test_replace",
"bids/modeling/tests/test_transformations.py::test_select",
"bids/modeling/tests/test_transformations.py::test_delete",
"bids/modeling/tests/test_transformations.py::test_and",
"bids/modeling/tests/test_transformations.py::test_or",
"bids/modeling/tests/test_transformations.py::test_not",
"bids/modeling/tests/test_transformations.py::test_dropna",
"bids/modeling/tests/test_transformations.py::test_group",
"bids/modeling/tests/test_transformations.py::test_resample",
"bids/modeling/tests/test_transformations.py::test_Lag"
] | [] | MIT License | 12,613 | 580 | [
"bids/modeling/transformations/base.py",
"bids/modeling/transformations/compute.py"
] |
|
juju__python-libjuju-666 | 7d2a423e3180230421ccfa16c309f3c2d0ac1cc2 | 2022-04-12 21:50:47 | 32ce25036d0482f616208470348a4b60d762785b | cderici: !!build!!
cderici: $$merge$$ | diff --git a/juju/constraints.py b/juju/constraints.py
index 3191f28..5c141ed 100644
--- a/juju/constraints.py
+++ b/juju/constraints.py
@@ -32,6 +32,25 @@ FACTORS = {
"Y": 1024 ** 6
}
+# List of supported constraint keys, see
+# http://github.com/cderici/juju/blob/2.9/core/constraints/constraints.go#L20-L39
+SUPPORTED_KEYS = [
+ "arch",
+ "container",
+ "cpu_cores",
+ "cores",
+ "cpu_power",
+ "mem",
+ "root_disk",
+ "root_disk_source",
+ "tags",
+ "instance_role",
+ "instance_type",
+ "spaces",
+ "virt_type",
+ "zones",
+ "allocate_public_ip"]
+
LIST_KEYS = {'tags', 'spaces'}
SNAKE1 = re.compile(r'(.)([A-Z][a-z]+)')
@@ -51,17 +70,20 @@ def parse(constraints):
# Fowards compatibilty: already parsed
return constraints
- constraints = {
- normalize_key(k): (
- normalize_list_value(v) if k in LIST_KEYS else
- normalize_value(v)
- ) for k, v in [s.split("=") for s in constraints.split(" ")]}
+ normalized_constraints = {}
+ for s in constraints.split(" "):
+ if "=" not in s:
+ raise Exception("malformed constraint %s" % s)
+
+ k, v = s.split("=")
+ normalized_constraints[normalize_key(k)] = normalize_list_value(v) if\
+ k in LIST_KEYS else normalize_value(v)
- return constraints
+ return normalized_constraints
-def normalize_key(key):
- key = key.strip()
+def normalize_key(orig_key):
+ key = orig_key.strip()
key = key.replace("-", "_") # Our _client lib wants "_" in place of "-"
@@ -69,6 +91,8 @@ def normalize_key(key):
key = SNAKE1.sub(r'\1_\2', key)
key = SNAKE2.sub(r'\1_\2', key).lower()
+ if key not in SUPPORTED_KEYS:
+ raise Exception("unknown constraint in %s" % orig_key)
return key
| Inconsistent handling of contraints in juju cli and python-libjuju
When trying to limit a charm to only deploy to machines with a minimum amount of disk space, I noticed that juju command line and python-libjuju validate syntax of constraints differently. This happened in a (wrong) attempt of use a relational ">=".
In juju cli, we got the error as expected:
jenkins@juju-38dff2-0:~$ juju deploy ubuntu --constraints 'tags=scalebot-dev-controller arch=amd64 root-disk>=16G'
ERROR unknown constraint "root-disk>"
jenkins@juju-38dff2-0:~$ juju deploy ubuntu --constraints 'tags=scalebot-dev-controller arch=amd64 root-disk=16G'
Located charm "cs:ubuntu-18".
Deploying charm "cs:ubuntu-18".
jenkins@juju-38dff2-0:~$ juju deploy ubuntu --constraints 'tags=scalebot-dev-controller arch=amd64 root-disk>16G'
ERROR malformed constraint "root-disk>16G"
jenkins@juju-38dff2-0:~$
With python-libjuju 2.9.7:
Python 3.8.0 (default, Dec 9 2021, 17:53:27)
[GCC 8.4.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import juju
>>> import juju.constraints
>>> juju.constraints.parse('tags=scalebot-dev-controller arch=amd64 root-disk>=16G')
{'tags': ['scalebot-dev-controller'], 'arch': 'amd64', 'root_disk>': 16384}
>>>
If we try to deploy using the API with something similar to:
application = await model.deploy(
"ubuntu",
application_name="ubuntu",
series=str_series,
channel="stable",
constraints=constraints.parse(str_constraints),
)
... the invalid constraint 'root_disk>' will be sent to MAAS having not effect but also raising no exception. I only tested with MAAS, but other cloud providers may also fail to parse invalid constraints.
| juju/python-libjuju | diff --git a/tests/unit/test_constraints.py b/tests/unit/test_constraints.py
index f76f516..244883e 100644
--- a/tests/unit/test_constraints.py
+++ b/tests/unit/test_constraints.py
@@ -20,11 +20,13 @@ class TestConstraints(unittest.TestCase):
def test_normalize_key(self):
_ = constraints.normalize_key
- self.assertEqual(_("test-key"), "test_key")
- self.assertEqual(_("test-key "), "test_key")
- self.assertEqual(_(" test-key"), "test_key")
- self.assertEqual(_("TestKey"), "test_key")
- self.assertEqual(_("testKey"), "test_key")
+ self.assertEqual(_("root-disk"), "root_disk")
+ self.assertEqual(_("root-disk "), "root_disk")
+ self.assertEqual(_(" root-disk"), "root_disk")
+ self.assertEqual(_("RootDisk"), "root_disk")
+ self.assertEqual(_("rootDisk"), "root_disk")
+
+ self.assertRaises(Exception, lambda: _("not-one-of-the-supported-keys"))
def test_normalize_val(self):
_ = constraints.normalize_value
@@ -53,13 +55,16 @@ class TestConstraints(unittest.TestCase):
)
self.assertEqual(
- _("mem=10G foo=bar,baz tags=tag1 spaces=space1,space2"),
+ _("mem=10G zones=bar,baz tags=tag1 spaces=space1,space2"),
{"mem": 10 * 1024,
- "foo": "bar,baz",
+ "zones": "bar,baz",
"tags": ["tag1"],
"spaces": ["space1", "space2"]}
)
+ self.assertRaises(Exception, lambda: _("root-disk>16G"))
+ self.assertRaises(Exception, lambda: _("root-disk>=16G"))
+
def test_parse_storage_constraint(self):
_ = constraints.parse_storage_constraint
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 2.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-asyncio",
"pytest-xdist"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | bcrypt==4.3.0
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
cryptography==44.0.2
durationpy==0.9
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
google-auth==2.38.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/juju/python-libjuju.git@7d2a423e3180230421ccfa16c309f3c2d0ac1cc2#egg=juju
jujubundlelib==0.5.7
kubernetes==32.0.1
macaroonbakery==1.3.4
mypy-extensions==1.0.0
oauthlib==3.2.2
packaging @ file:///croot/packaging_1734472117206/work
paramiko==2.12.0
pluggy @ file:///croot/pluggy_1733169602837/work
protobuf==6.30.2
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycparser==2.22
pymacaroons==0.13.0
PyNaCl==1.5.0
pyRFC3339==1.1
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-xdist==3.6.1
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.9
six==1.17.0
theblues==0.5.2
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
toposort==1.10
typing-inspect==0.9.0
typing_extensions==4.13.0
urllib3==2.3.0
websocket-client==1.8.0
websockets==8.1
| name: python-libjuju
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- bcrypt==4.3.0
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- cryptography==44.0.2
- durationpy==0.9
- execnet==2.1.1
- google-auth==2.38.0
- idna==3.10
- jujubundlelib==0.5.7
- kubernetes==32.0.1
- macaroonbakery==1.3.4
- mypy-extensions==1.0.0
- oauthlib==3.2.2
- paramiko==2.12.0
- protobuf==6.30.2
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pycparser==2.22
- pymacaroons==0.13.0
- pynacl==1.5.0
- pyrfc3339==1.1
- pytest-asyncio==0.26.0
- pytest-xdist==3.6.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0
- requests==2.32.3
- requests-oauthlib==2.0.0
- rsa==4.9
- six==1.17.0
- theblues==0.5.2
- toposort==1.10
- typing-extensions==4.13.0
- typing-inspect==0.9.0
- urllib3==2.3.0
- websocket-client==1.8.0
- websockets==8.1
prefix: /opt/conda/envs/python-libjuju
| [
"tests/unit/test_constraints.py::TestConstraints::test_normalize_key",
"tests/unit/test_constraints.py::TestConstraints::test_parse_constraints"
] | [] | [
"tests/unit/test_constraints.py::TestConstraints::test_mem_regex",
"tests/unit/test_constraints.py::TestConstraints::test_normalize_list_val",
"tests/unit/test_constraints.py::TestConstraints::test_normalize_val",
"tests/unit/test_constraints.py::TestConstraints::test_parse_device_constraint",
"tests/unit/test_constraints.py::TestConstraints::test_parse_storage_constraint"
] | [] | Apache License 2.0 | 12,614 | 556 | [
"juju/constraints.py"
] |
snowflakedb__snowflake-connector-python-1094 | 7bb60da3b48edede5b636d449e5291acdc768534 | 2022-04-14 18:26:26 | 28caf1fb78a742739eedea14cfd4111592dd4e14 | github-actions[bot]: **CLA Assistant Lite bot:** <br/>Thank you for your submission, we really appreciate it. Like many open-source projects, we ask that you sign our [Contributor License Agreement](https://github.com/Snowflake-Labs/CLA/blob/main/README.md) before we can accept your contribution. You can sign the CLA by just posting a Pull Request Comment same as the below format.<br/>
- - -
***I have read the CLA Document and I hereby sign the CLA***
- - -
<sub>You can retrigger this bot by commenting **recheck** in this Pull Request</sub>
sfc-gh-zblackwood: I have read the CLA Document and I hereby sign the CLA | diff --git a/src/snowflake/connector/connection.py b/src/snowflake/connector/connection.py
index 6387cdb0..f6f07031 100644
--- a/src/snowflake/connector/connection.py
+++ b/src/snowflake/connector/connection.py
@@ -268,18 +268,13 @@ class SnowflakeConnection:
self.heartbeat_thread = None
+ if "application" not in kwargs and ENV_VAR_PARTNER in os.environ.keys():
+ kwargs["application"] = os.environ[ENV_VAR_PARTNER]
+
self.converter = None
self.__set_error_attributes()
self.connect(**kwargs)
self._telemetry = TelemetryClient(self._rest)
- # Some configuration files need to be updated here to make them testable
- # E.g.: if DEFAULT_CONFIGURATION pulled in env variables these would be not testable
- if (
- self.application
- == DEFAULT_CONFIGURATION["application"][0] # still default value
- and ENV_VAR_PARTNER in os.environ.keys() # is defined as an env variable
- ):
- self._application = os.environ[ENV_VAR_PARTNER]
def __del__(self): # pragma: no cover
try:
| SNOW-576084: Setting environment variable SF_PARTNER doesn't send overridden `application` value in requests
Please answer these questions before submitting your issue. Thanks!
1. What version of Python are you using?
```
Python 3.9.11 (main, Apr 5 2022, 09:45:25)
[Clang 13.1.6 (clang-1316.0.21.2)]
```
2. What operating system and processor architecture are you using?
```
macOS-12.3.1-arm64-arm-64bit
```
3. What are the component versions in the environment (`pip freeze`)?
Testing locally
```
-e git+ssh://[email protected]/sfc-gh-zblackwood/snowflake-connector-python.git@0a3ad2de0fda656e8d5871c3eabf9ef6d618e190#egg=snowflake_connector_python
```
4. What did you do?
Here is the issue written as a test (note that the first assert succeeds, and the last fails)
```
@pytest.mark.skipolddriver
@patch("snowflake.connector.network.SnowflakeRestful._post_request")
def test_partner_env_var(mockSnowflakeRestfulPostRequest, capsys):
PARTNER_NAME = "Amanda"
def mock_post_request(url, headers, json_body, **kwargs):
global mock_cnt
print(json_body)
ret = None
if mock_cnt == 0:
# return from /v1/login-request
ret = {
"success": True,
"message": None,
"data": {
"token": "TOKEN",
"masterToken": "MASTER_TOKEN",
"idToken": None,
"parameters": [
{"name": "SERVICE_NAME", "value": "FAKE_SERVICE_NAME"}
],
},
}
return ret
# POST requests mock
mockSnowflakeRestfulPostRequest.side_effect = mock_post_request
global mock_cnt
mock_cnt = 0
with patch.dict(os.environ, {ENV_VAR_PARTNER: PARTNER_NAME}):
# connection
con = snowflake.connector.connect(
user="user",
account="account",
password="testpassword",
database="TESTDB",
warehouse="TESTWH",
)
assert con.application == PARTNER_NAME
# Check that the json body of the request that is made contains the new
# APPLICATION name, instead of the default value
captured = capsys.readouterr()
assert f'"APPLICATION": "{PARTNER_NAME}"' in captured.out
```
5. What did you expect to see?
I expected that setting ENV_VAR_PARTNER environment variable would result in APPLICATION being overwritten in the requests, but it was not.
6. Can you set logging to DEBUG and collect the logs?
```
import logging
import os
for logger_name in ['snowflake.sqlalchemy', 'snowflake.connector', 'botocore']:
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter('%(asctime)s - %(threadName)s %(filename)s:%(lineno)d - %(funcName)s() - %(levelname)s - %(message)s'))
logger.addHandler(ch)
```
<!--
If you need urgent assistance reach out to support for escalated issue processing https://community.snowflake.com/s/article/How-To-Submit-a-Support-Case-in-Snowflake-Lodge
-->
| snowflakedb/snowflake-connector-python | diff --git a/test/unit/test_connection.py b/test/unit/test_connection.py
index 792529a0..6be22ce0 100644
--- a/test/unit/test_connection.py
+++ b/test/unit/test_connection.py
@@ -4,6 +4,7 @@
#
from __future__ import annotations
+import json
import os
from unittest.mock import patch
@@ -136,12 +137,38 @@ def test_is_still_running():
@pytest.mark.skipolddriver
-def test_partner_env_var():
- with patch.dict(os.environ, {ENV_VAR_PARTNER: "Amanda"}):
- with patch("snowflake.connector.network.SnowflakeRestful.fetch"):
- with snowflake.connector.connect(
- user="user",
- account="account",
- password="password123",
- ) as conn:
- assert conn.application == "Amanda"
+@patch("snowflake.connector.network.SnowflakeRestful._post_request")
+def test_partner_env_var(mockSnowflakeRestfulPostRequest):
+ PARTNER_NAME = "Amanda"
+
+ request_body = {}
+
+ def mock_post_request(url, headers, json_body, **kwargs):
+ nonlocal request_body
+ request_body = json.loads(json_body)
+ return {
+ "success": True,
+ "message": None,
+ "data": {
+ "token": "TOKEN",
+ "masterToken": "MASTER_TOKEN",
+ "idToken": None,
+ "parameters": [{"name": "SERVICE_NAME", "value": "FAKE_SERVICE_NAME"}],
+ },
+ }
+
+ # POST requests mock
+ mockSnowflakeRestfulPostRequest.side_effect = mock_post_request
+
+ with patch.dict(os.environ, {ENV_VAR_PARTNER: PARTNER_NAME}):
+ # connection
+ con = snowflake.connector.connect(
+ user="user",
+ account="account",
+ password="testpassword",
+ database="TESTDB",
+ warehouse="TESTWH",
+ )
+ assert con.application == PARTNER_NAME
+
+ assert request_body["data"]["CLIENT_ENVIRONMENT"]["APPLICATION"] == PARTNER_NAME
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_git_commit_hash"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 2.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asn1crypto==1.5.1
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==2.0.12
coverage==7.8.0
cryptography==36.0.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
oscrypto==1.3.0
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pycparser==2.22
pycryptodomex==3.22.0
PyJWT==2.10.1
pyOpenSSL==21.0.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-cov==6.0.0
pytest-xdist==3.6.1
pytz==2025.2
requests==2.32.3
six==1.17.0
-e git+https://github.com/snowflakedb/snowflake-connector-python.git@7bb60da3b48edede5b636d449e5291acdc768534#egg=snowflake_connector_python
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
urllib3==2.3.0
| name: snowflake-connector-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asn1crypto==1.5.1
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==2.0.12
- coverage==7.8.0
- cryptography==36.0.2
- execnet==2.1.1
- idna==3.10
- oscrypto==1.3.0
- pycparser==2.22
- pycryptodomex==3.22.0
- pyjwt==2.10.1
- pyopenssl==21.0.0
- pytest-cov==6.0.0
- pytest-xdist==3.6.1
- pytz==2025.2
- requests==2.32.3
- six==1.17.0
- snowflake-connector-python==2.7.6
- urllib3==2.3.0
prefix: /opt/conda/envs/snowflake-connector-python
| [
"test/unit/test_connection.py::test_partner_env_var"
] | [] | [
"test/unit/test_connection.py::test_connect_with_service_name",
"test/unit/test_connection.py::test_is_still_running"
] | [] | Apache License 2.0 | 12,628 | 284 | [
"src/snowflake/connector/connection.py"
] |
burnash__gspread-1030 | 0a7de5d4089c922c936711287bf14a00880fd987 | 2022-04-14 21:54:42 | 3d1092fd26b68fda3b8665c9782abcbe6628f090 | diff --git a/gspread/worksheet.py b/gspread/worksheet.py
index 46cebb6..79a9135 100644
--- a/gspread/worksheet.py
+++ b/gspread/worksheet.py
@@ -2095,3 +2095,30 @@ class Worksheet:
:param int end: The (exclusive) end row to hide
"""
return self._unhide_dimension(start, end, Dimension.rows)
+
+ def _set_hidden_flag(self, hidden):
+ """Send the appropriate request to hide/show the current worksheet"""
+
+ body = {
+ "requests": [
+ {
+ "updateSheetProperties": {
+ "properties": {
+ "sheetId": self.id,
+ "hidden": hidden,
+ },
+ "fields": "hidden",
+ }
+ }
+ ]
+ }
+
+ return self.spreadsheet.batch_update(body)
+
+ def hide(self):
+ """Hides the current worksheet from the UI."""
+ return self._set_hidden_flag(True)
+
+ def show(self):
+ """Show the current worksheet in the UI."""
+ return self._set_hidden_flag(False)
| add method to hide worksheet
## Overview
Add method to hide a worksheet.
## Details
The api provide a way to hide a worksheet: [sheet API v4](https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/sheets#SheetProperties)
expose necessary methods to hide/unhide a worksheet. | burnash/gspread | diff --git a/tests/cassettes/WorksheetTest.test_hide_show_worksheet.json b/tests/cassettes/WorksheetTest.test_hide_show_worksheet.json
new file mode 100644
index 0000000..49021a2
--- /dev/null
+++ b/tests/cassettes/WorksheetTest.test_hide_show_worksheet.json
@@ -0,0 +1,890 @@
+{
+ "version": 1,
+ "interactions": [
+ {
+ "request": {
+ "method": "POST",
+ "uri": "https://www.googleapis.com/drive/v3/files?supportsAllDrives=True",
+ "body": "{\"name\": \"Test WorksheetTest test_hide_show_worksheet\", \"mimeType\": \"application/vnd.google-apps.spreadsheet\"}",
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "110"
+ ],
+ "Content-Type": [
+ "application/json"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Content-Security-Policy": [
+ "frame-ancestors 'self'"
+ ],
+ "Pragma": [
+ "no-cache"
+ ],
+ "Cache-Control": [
+ "no-cache, no-store, max-age=0, must-revalidate"
+ ],
+ "Date": [
+ "Mon, 25 Apr 2022 22:07:51 GMT"
+ ],
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin"
+ ],
+ "Expires": [
+ "Mon, 01 Jan 1990 00:00:00 GMT"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "Server": [
+ "GSE"
+ ],
+ "X-XSS-Protection": [
+ "1; mode=block"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "content-length": [
+ "193"
+ ]
+ },
+ "body": {
+ "string": "{\n \"kind\": \"drive#file\",\n \"id\": \"1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE\",\n \"name\": \"Test WorksheetTest test_hide_show_worksheet\",\n \"mimeType\": \"application/vnd.google-apps.spreadsheet\"\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "GET",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE?includeGridData=false",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Mon, 25 Apr 2022 22:07:51 GMT"
+ ],
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "content-length": [
+ "3341"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE\",\n \"properties\": {\n \"title\": \"Test WorksheetTest test_hide_show_worksheet\",\n \"locale\": \"en_US\",\n \"autoRecalc\": \"ON_CHANGE\",\n \"timeZone\": \"Etc/GMT\",\n \"defaultFormat\": {\n \"backgroundColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n },\n \"padding\": {\n \"top\": 2,\n \"right\": 3,\n \"bottom\": 2,\n \"left\": 3\n },\n \"verticalAlignment\": \"BOTTOM\",\n \"wrapStrategy\": \"OVERFLOW_CELL\",\n \"textFormat\": {\n \"foregroundColor\": {},\n \"fontFamily\": \"arial,sans,sans-serif\",\n \"fontSize\": 10,\n \"bold\": false,\n \"italic\": false,\n \"strikethrough\": false,\n \"underline\": false,\n \"foregroundColorStyle\": {\n \"rgbColor\": {}\n }\n },\n \"backgroundColorStyle\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n \"spreadsheetTheme\": {\n \"primaryFontFamily\": \"Arial\",\n \"themeColors\": [\n {\n \"colorType\": \"TEXT\",\n \"color\": {\n \"rgbColor\": {}\n }\n },\n {\n \"colorType\": \"BACKGROUND\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n {\n \"colorType\": \"ACCENT1\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.25882354,\n \"green\": 0.52156866,\n \"blue\": 0.95686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT2\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.91764706,\n \"green\": 0.2627451,\n \"blue\": 0.20784314\n }\n }\n },\n {\n \"colorType\": \"ACCENT3\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.9843137,\n \"green\": 0.7372549,\n \"blue\": 0.015686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT4\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.20392157,\n \"green\": 0.65882355,\n \"blue\": 0.3254902\n }\n }\n },\n {\n \"colorType\": \"ACCENT5\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 0.42745098,\n \"blue\": 0.003921569\n }\n }\n },\n {\n \"colorType\": \"ACCENT6\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.27450982,\n \"green\": 0.7411765,\n \"blue\": 0.7764706\n }\n }\n },\n {\n \"colorType\": \"LINK\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.06666667,\n \"green\": 0.33333334,\n \"blue\": 0.8\n }\n }\n }\n ]\n }\n },\n \"sheets\": [\n {\n \"properties\": {\n \"sheetId\": 0,\n \"title\": \"Sheet1\",\n \"index\": 0,\n \"sheetType\": \"GRID\",\n \"gridProperties\": {\n \"rowCount\": 1000,\n \"columnCount\": 26\n }\n }\n }\n ],\n \"spreadsheetUrl\": \"https://docs.google.com/spreadsheets/d/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE/edit\"\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "GET",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE?includeGridData=false",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Mon, 25 Apr 2022 22:07:52 GMT"
+ ],
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "content-length": [
+ "3341"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE\",\n \"properties\": {\n \"title\": \"Test WorksheetTest test_hide_show_worksheet\",\n \"locale\": \"en_US\",\n \"autoRecalc\": \"ON_CHANGE\",\n \"timeZone\": \"Etc/GMT\",\n \"defaultFormat\": {\n \"backgroundColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n },\n \"padding\": {\n \"top\": 2,\n \"right\": 3,\n \"bottom\": 2,\n \"left\": 3\n },\n \"verticalAlignment\": \"BOTTOM\",\n \"wrapStrategy\": \"OVERFLOW_CELL\",\n \"textFormat\": {\n \"foregroundColor\": {},\n \"fontFamily\": \"arial,sans,sans-serif\",\n \"fontSize\": 10,\n \"bold\": false,\n \"italic\": false,\n \"strikethrough\": false,\n \"underline\": false,\n \"foregroundColorStyle\": {\n \"rgbColor\": {}\n }\n },\n \"backgroundColorStyle\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n \"spreadsheetTheme\": {\n \"primaryFontFamily\": \"Arial\",\n \"themeColors\": [\n {\n \"colorType\": \"TEXT\",\n \"color\": {\n \"rgbColor\": {}\n }\n },\n {\n \"colorType\": \"BACKGROUND\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n {\n \"colorType\": \"ACCENT1\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.25882354,\n \"green\": 0.52156866,\n \"blue\": 0.95686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT2\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.91764706,\n \"green\": 0.2627451,\n \"blue\": 0.20784314\n }\n }\n },\n {\n \"colorType\": \"ACCENT3\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.9843137,\n \"green\": 0.7372549,\n \"blue\": 0.015686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT4\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.20392157,\n \"green\": 0.65882355,\n \"blue\": 0.3254902\n }\n }\n },\n {\n \"colorType\": \"ACCENT5\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 0.42745098,\n \"blue\": 0.003921569\n }\n }\n },\n {\n \"colorType\": \"ACCENT6\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.27450982,\n \"green\": 0.7411765,\n \"blue\": 0.7764706\n }\n }\n },\n {\n \"colorType\": \"LINK\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.06666667,\n \"green\": 0.33333334,\n \"blue\": 0.8\n }\n }\n }\n ]\n }\n },\n \"sheets\": [\n {\n \"properties\": {\n \"sheetId\": 0,\n \"title\": \"Sheet1\",\n \"index\": 0,\n \"sheetType\": \"GRID\",\n \"gridProperties\": {\n \"rowCount\": 1000,\n \"columnCount\": 26\n }\n }\n }\n ],\n \"spreadsheetUrl\": \"https://docs.google.com/spreadsheets/d/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE/edit\"\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "POST",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE/values/%27Sheet1%27:clear",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "0"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Mon, 25 Apr 2022 22:07:52 GMT"
+ ],
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "content-length": [
+ "107"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE\",\n \"clearedRange\": \"Sheet1!A1:Z1000\"\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "POST",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE:batchUpdate",
+ "body": "{\"requests\": [{\"updateSheetProperties\": {\"properties\": {\"sheetId\": 0, \"hidden\": true}, \"fields\": \"hidden\"}}]}",
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "109"
+ ],
+ "Content-Type": [
+ "application/json"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 400,
+ "message": "Bad Request"
+ },
+ "headers": {
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Mon, 25 Apr 2022 22:07:52 GMT"
+ ],
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "content-length": [
+ "177"
+ ]
+ },
+ "body": {
+ "string": "{\n \"error\": {\n \"code\": 400,\n \"message\": \"Invalid requests[0].updateSheetProperties: You can't hide all the sheets in a document.\",\n \"status\": \"INVALID_ARGUMENT\"\n }\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "POST",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE:batchUpdate",
+ "body": "{\"requests\": [{\"addSheet\": {\"properties\": {\"title\": \"you cannot see me\", \"sheetType\": \"GRID\", \"gridProperties\": {\"rowCount\": 2, \"columnCount\": 2}}}}]}",
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "150"
+ ],
+ "Content-Type": [
+ "application/json"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Mon, 25 Apr 2022 22:07:52 GMT"
+ ],
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "content-length": [
+ "387"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE\",\n \"replies\": [\n {\n \"addSheet\": {\n \"properties\": {\n \"sheetId\": 517632659,\n \"title\": \"you cannot see me\",\n \"index\": 1,\n \"sheetType\": \"GRID\",\n \"gridProperties\": {\n \"rowCount\": 2,\n \"columnCount\": 2\n }\n }\n }\n }\n ]\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "GET",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE?includeGridData=false",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Mon, 25 Apr 2022 22:07:53 GMT"
+ ],
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "content-length": [
+ "3591"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE\",\n \"properties\": {\n \"title\": \"Test WorksheetTest test_hide_show_worksheet\",\n \"locale\": \"en_US\",\n \"autoRecalc\": \"ON_CHANGE\",\n \"timeZone\": \"Etc/GMT\",\n \"defaultFormat\": {\n \"backgroundColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n },\n \"padding\": {\n \"top\": 2,\n \"right\": 3,\n \"bottom\": 2,\n \"left\": 3\n },\n \"verticalAlignment\": \"BOTTOM\",\n \"wrapStrategy\": \"OVERFLOW_CELL\",\n \"textFormat\": {\n \"foregroundColor\": {},\n \"fontFamily\": \"arial,sans,sans-serif\",\n \"fontSize\": 10,\n \"bold\": false,\n \"italic\": false,\n \"strikethrough\": false,\n \"underline\": false,\n \"foregroundColorStyle\": {\n \"rgbColor\": {}\n }\n },\n \"backgroundColorStyle\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n \"spreadsheetTheme\": {\n \"primaryFontFamily\": \"Arial\",\n \"themeColors\": [\n {\n \"colorType\": \"TEXT\",\n \"color\": {\n \"rgbColor\": {}\n }\n },\n {\n \"colorType\": \"BACKGROUND\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n {\n \"colorType\": \"ACCENT1\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.25882354,\n \"green\": 0.52156866,\n \"blue\": 0.95686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT2\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.91764706,\n \"green\": 0.2627451,\n \"blue\": 0.20784314\n }\n }\n },\n {\n \"colorType\": \"ACCENT3\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.9843137,\n \"green\": 0.7372549,\n \"blue\": 0.015686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT4\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.20392157,\n \"green\": 0.65882355,\n \"blue\": 0.3254902\n }\n }\n },\n {\n \"colorType\": \"ACCENT5\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 0.42745098,\n \"blue\": 0.003921569\n }\n }\n },\n {\n \"colorType\": \"ACCENT6\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.27450982,\n \"green\": 0.7411765,\n \"blue\": 0.7764706\n }\n }\n },\n {\n \"colorType\": \"LINK\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.06666667,\n \"green\": 0.33333334,\n \"blue\": 0.8\n }\n }\n }\n ]\n }\n },\n \"sheets\": [\n {\n \"properties\": {\n \"sheetId\": 0,\n \"title\": \"Sheet1\",\n \"index\": 0,\n \"sheetType\": \"GRID\",\n \"gridProperties\": {\n \"rowCount\": 1000,\n \"columnCount\": 26\n }\n }\n },\n {\n \"properties\": {\n \"sheetId\": 517632659,\n \"title\": \"you cannot see me\",\n \"index\": 1,\n \"sheetType\": \"GRID\",\n \"gridProperties\": {\n \"rowCount\": 2,\n \"columnCount\": 2\n }\n }\n }\n ],\n \"spreadsheetUrl\": \"https://docs.google.com/spreadsheets/d/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE/edit\"\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "POST",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE:batchUpdate",
+ "body": "{\"requests\": [{\"updateSheetProperties\": {\"properties\": {\"sheetId\": 517632659, \"hidden\": true}, \"fields\": \"hidden\"}}]}",
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "117"
+ ],
+ "Content-Type": [
+ "application/json"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Mon, 25 Apr 2022 22:07:53 GMT"
+ ],
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "content-length": [
+ "97"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE\",\n \"replies\": [\n {}\n ]\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "GET",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE?includeGridData=false",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Mon, 25 Apr 2022 22:07:53 GMT"
+ ],
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "content-length": [
+ "3615"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE\",\n \"properties\": {\n \"title\": \"Test WorksheetTest test_hide_show_worksheet\",\n \"locale\": \"en_US\",\n \"autoRecalc\": \"ON_CHANGE\",\n \"timeZone\": \"Etc/GMT\",\n \"defaultFormat\": {\n \"backgroundColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n },\n \"padding\": {\n \"top\": 2,\n \"right\": 3,\n \"bottom\": 2,\n \"left\": 3\n },\n \"verticalAlignment\": \"BOTTOM\",\n \"wrapStrategy\": \"OVERFLOW_CELL\",\n \"textFormat\": {\n \"foregroundColor\": {},\n \"fontFamily\": \"arial,sans,sans-serif\",\n \"fontSize\": 10,\n \"bold\": false,\n \"italic\": false,\n \"strikethrough\": false,\n \"underline\": false,\n \"foregroundColorStyle\": {\n \"rgbColor\": {}\n }\n },\n \"backgroundColorStyle\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n \"spreadsheetTheme\": {\n \"primaryFontFamily\": \"Arial\",\n \"themeColors\": [\n {\n \"colorType\": \"TEXT\",\n \"color\": {\n \"rgbColor\": {}\n }\n },\n {\n \"colorType\": \"BACKGROUND\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n {\n \"colorType\": \"ACCENT1\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.25882354,\n \"green\": 0.52156866,\n \"blue\": 0.95686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT2\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.91764706,\n \"green\": 0.2627451,\n \"blue\": 0.20784314\n }\n }\n },\n {\n \"colorType\": \"ACCENT3\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.9843137,\n \"green\": 0.7372549,\n \"blue\": 0.015686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT4\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.20392157,\n \"green\": 0.65882355,\n \"blue\": 0.3254902\n }\n }\n },\n {\n \"colorType\": \"ACCENT5\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 0.42745098,\n \"blue\": 0.003921569\n }\n }\n },\n {\n \"colorType\": \"ACCENT6\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.27450982,\n \"green\": 0.7411765,\n \"blue\": 0.7764706\n }\n }\n },\n {\n \"colorType\": \"LINK\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.06666667,\n \"green\": 0.33333334,\n \"blue\": 0.8\n }\n }\n }\n ]\n }\n },\n \"sheets\": [\n {\n \"properties\": {\n \"sheetId\": 0,\n \"title\": \"Sheet1\",\n \"index\": 0,\n \"sheetType\": \"GRID\",\n \"gridProperties\": {\n \"rowCount\": 1000,\n \"columnCount\": 26\n }\n }\n },\n {\n \"properties\": {\n \"sheetId\": 517632659,\n \"title\": \"you cannot see me\",\n \"index\": 1,\n \"sheetType\": \"GRID\",\n \"gridProperties\": {\n \"rowCount\": 2,\n \"columnCount\": 2\n },\n \"hidden\": true\n }\n }\n ],\n \"spreadsheetUrl\": \"https://docs.google.com/spreadsheets/d/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE/edit\"\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "POST",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE:batchUpdate",
+ "body": "{\"requests\": [{\"updateSheetProperties\": {\"properties\": {\"sheetId\": 517632659, \"hidden\": false}, \"fields\": \"hidden\"}}]}",
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "118"
+ ],
+ "Content-Type": [
+ "application/json"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Mon, 25 Apr 2022 22:07:53 GMT"
+ ],
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "content-length": [
+ "97"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE\",\n \"replies\": [\n {}\n ]\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "GET",
+ "uri": "https://sheets.googleapis.com/v4/spreadsheets/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE?includeGridData=false",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "headers": {
+ "Cache-Control": [
+ "private"
+ ],
+ "Date": [
+ "Mon, 25 Apr 2022 22:07:54 GMT"
+ ],
+ "Transfer-Encoding": [
+ "chunked"
+ ],
+ "Vary": [
+ "Origin",
+ "X-Origin",
+ "Referer"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Content-Type": [
+ "application/json; charset=UTF-8"
+ ],
+ "content-length": [
+ "3591"
+ ]
+ },
+ "body": {
+ "string": "{\n \"spreadsheetId\": \"1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE\",\n \"properties\": {\n \"title\": \"Test WorksheetTest test_hide_show_worksheet\",\n \"locale\": \"en_US\",\n \"autoRecalc\": \"ON_CHANGE\",\n \"timeZone\": \"Etc/GMT\",\n \"defaultFormat\": {\n \"backgroundColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n },\n \"padding\": {\n \"top\": 2,\n \"right\": 3,\n \"bottom\": 2,\n \"left\": 3\n },\n \"verticalAlignment\": \"BOTTOM\",\n \"wrapStrategy\": \"OVERFLOW_CELL\",\n \"textFormat\": {\n \"foregroundColor\": {},\n \"fontFamily\": \"arial,sans,sans-serif\",\n \"fontSize\": 10,\n \"bold\": false,\n \"italic\": false,\n \"strikethrough\": false,\n \"underline\": false,\n \"foregroundColorStyle\": {\n \"rgbColor\": {}\n }\n },\n \"backgroundColorStyle\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n \"spreadsheetTheme\": {\n \"primaryFontFamily\": \"Arial\",\n \"themeColors\": [\n {\n \"colorType\": \"TEXT\",\n \"color\": {\n \"rgbColor\": {}\n }\n },\n {\n \"colorType\": \"BACKGROUND\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 1,\n \"blue\": 1\n }\n }\n },\n {\n \"colorType\": \"ACCENT1\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.25882354,\n \"green\": 0.52156866,\n \"blue\": 0.95686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT2\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.91764706,\n \"green\": 0.2627451,\n \"blue\": 0.20784314\n }\n }\n },\n {\n \"colorType\": \"ACCENT3\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.9843137,\n \"green\": 0.7372549,\n \"blue\": 0.015686275\n }\n }\n },\n {\n \"colorType\": \"ACCENT4\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.20392157,\n \"green\": 0.65882355,\n \"blue\": 0.3254902\n }\n }\n },\n {\n \"colorType\": \"ACCENT5\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 1,\n \"green\": 0.42745098,\n \"blue\": 0.003921569\n }\n }\n },\n {\n \"colorType\": \"ACCENT6\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.27450982,\n \"green\": 0.7411765,\n \"blue\": 0.7764706\n }\n }\n },\n {\n \"colorType\": \"LINK\",\n \"color\": {\n \"rgbColor\": {\n \"red\": 0.06666667,\n \"green\": 0.33333334,\n \"blue\": 0.8\n }\n }\n }\n ]\n }\n },\n \"sheets\": [\n {\n \"properties\": {\n \"sheetId\": 0,\n \"title\": \"Sheet1\",\n \"index\": 0,\n \"sheetType\": \"GRID\",\n \"gridProperties\": {\n \"rowCount\": 1000,\n \"columnCount\": 26\n }\n }\n },\n {\n \"properties\": {\n \"sheetId\": 517632659,\n \"title\": \"you cannot see me\",\n \"index\": 1,\n \"sheetType\": \"GRID\",\n \"gridProperties\": {\n \"rowCount\": 2,\n \"columnCount\": 2\n }\n }\n }\n ],\n \"spreadsheetUrl\": \"https://docs.google.com/spreadsheets/d/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE/edit\"\n}\n"
+ }
+ }
+ },
+ {
+ "request": {
+ "method": "DELETE",
+ "uri": "https://www.googleapis.com/drive/v3/files/1OT2kPgNpvza778NIvvt6yqRFLEt6iRQeBW80AI_fmrE?supportsAllDrives=True",
+ "body": null,
+ "headers": {
+ "User-Agent": [
+ "python-requests/2.27.1"
+ ],
+ "Accept-Encoding": [
+ "gzip, deflate"
+ ],
+ "Accept": [
+ "*/*"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "0"
+ ],
+ "authorization": [
+ "<ACCESS_TOKEN>"
+ ]
+ }
+ },
+ "response": {
+ "status": {
+ "code": 204,
+ "message": "No Content"
+ },
+ "headers": {
+ "Pragma": [
+ "no-cache"
+ ],
+ "Cache-Control": [
+ "no-cache, no-store, max-age=0, must-revalidate"
+ ],
+ "Date": [
+ "Mon, 25 Apr 2022 22:07:54 GMT"
+ ],
+ "Content-Length": [
+ "0"
+ ],
+ "Vary": [
+ "Origin, X-Origin"
+ ],
+ "Expires": [
+ "Mon, 01 Jan 1990 00:00:00 GMT"
+ ],
+ "Server": [
+ "ESF"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "X-XSS-Protection": [
+ "0"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "Alt-Svc": [
+ "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""
+ ],
+ "Content-Type": [
+ "text/html"
+ ]
+ },
+ "body": {
+ "string": ""
+ }
+ }
+ }
+ ]
+}
diff --git a/tests/worksheet_test.py b/tests/worksheet_test.py
index e25403f..e56bf20 100644
--- a/tests/worksheet_test.py
+++ b/tests/worksheet_test.py
@@ -6,7 +6,7 @@ import pytest
import gspread
import gspread.utils as utils
-from gspread.exceptions import GSpreadException
+from gspread.exceptions import APIError, GSpreadException
from .conftest import I18N_STR, GspreadTest
@@ -220,7 +220,7 @@ class WorksheetTest(GspreadTest):
new_rows = self.sheet.row_count + add_num
def get_grid_props():
- sheets = self.sheet.spreadsheet.fetch_sheet_metadata()["sheets"]
+ sheets = self.spreadsheet.fetch_sheet_metadata()["sheets"]
return utils.finditem(
lambda x: x["properties"]["sheetId"] == self.sheet.id, sheets
)["properties"]["gridProperties"]
@@ -308,7 +308,7 @@ class WorksheetTest(GspreadTest):
freeze_rows = 2
def get_grid_props():
- sheets = self.sheet.spreadsheet.fetch_sheet_metadata()["sheets"]
+ sheets = self.spreadsheet.fetch_sheet_metadata()["sheets"]
return utils.finditem(
lambda x: x["properties"]["sheetId"] == self.sheet.id, sheets
)["properties"]["gridProperties"]
@@ -335,7 +335,7 @@ class WorksheetTest(GspreadTest):
@pytest.mark.vcr()
def test_basic_filters(self):
def get_sheet():
- sheets = self.sheet.spreadsheet.fetch_sheet_metadata()["sheets"]
+ sheets = self.spreadsheet.fetch_sheet_metadata()["sheets"]
return utils.finditem(
lambda x: x["properties"]["sheetId"] == self.sheet.id, sheets
)
@@ -976,3 +976,36 @@ class WorksheetTest(GspreadTest):
w.hide_rows(0, 2)
w.unhide_rows(0, 2)
+
+ @pytest.mark.vcr()
+ def test_hide_show_worksheet(self):
+ """We can't retrieve this property from the API
+ see issue: https://issuetracker.google.com/issues/229298342
+
+ We can only send the request and make sure it works.
+ This is a trivial method, using recorded cassettes it will never fail.
+ But next time we refresh the cassette it will make the real request."""
+
+ # you cannot hide all worksheet in a document
+ with pytest.raises(APIError):
+ self.sheet.hide()
+
+ new_sheet = self.spreadsheet.add_worksheet("you cannot see me", 2, 2)
+
+ # as describe in https://issuetracker.google.com/issues/229298342
+ # the response does not include some default values.
+ # if missing => value is False
+ res = self.spreadsheet.fetch_sheet_metadata()
+ before_hide = res["sheets"][1]["properties"].get("hidden", False)
+ self.assertFalse(before_hide)
+
+ new_sheet.hide()
+
+ res = self.spreadsheet.fetch_sheet_metadata()
+ after_hide = res["sheets"][1]["properties"].get("hidden", False)
+ self.assertTrue(after_hide)
+
+ new_sheet.show()
+ res = self.spreadsheet.fetch_sheet_metadata()
+ before_hide = res["sheets"][1]["properties"].get("hidden", False)
+ self.assertFalse(before_hide)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 5.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-vcr"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
google-auth==2.38.0
google-auth-oauthlib==1.2.1
-e git+https://github.com/burnash/gspread.git@0a7de5d4089c922c936711287bf14a00880fd987#egg=gspread
idna==3.10
iniconfig==2.1.0
multidict==6.2.0
oauthlib==3.2.2
packaging==24.2
pluggy==1.5.0
propcache==0.3.1
pyasn1==0.6.1
pyasn1_modules==0.4.2
pytest==8.3.5
pytest-vcr==1.0.2
PyYAML==6.0.2
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.9
tomli==2.2.1
typing_extensions==4.13.0
urllib3==1.26.20
vcrpy==7.0.0
wrapt==1.17.2
yarl==1.18.3
| name: gspread
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- google-auth==2.38.0
- google-auth-oauthlib==1.2.1
- idna==3.10
- iniconfig==2.1.0
- multidict==6.2.0
- oauthlib==3.2.2
- packaging==24.2
- pluggy==1.5.0
- propcache==0.3.1
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pytest==8.3.5
- pytest-vcr==1.0.2
- pyyaml==6.0.2
- requests==2.32.3
- requests-oauthlib==2.0.0
- rsa==4.9
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==1.26.20
- vcrpy==7.0.0
- wrapt==1.17.2
- yarl==1.18.3
prefix: /opt/conda/envs/gspread
| [
"tests/worksheet_test.py::WorksheetTest::test_hide_show_worksheet"
] | [] | [
"tests/worksheet_test.py::WorksheetTest::test_acell",
"tests/worksheet_test.py::WorksheetTest::test_append_row",
"tests/worksheet_test.py::WorksheetTest::test_append_row_with_empty_value",
"tests/worksheet_test.py::WorksheetTest::test_append_row_with_empty_value_and_table_range",
"tests/worksheet_test.py::WorksheetTest::test_basic_filters",
"tests/worksheet_test.py::WorksheetTest::test_batch_clear",
"tests/worksheet_test.py::WorksheetTest::test_batch_get",
"tests/worksheet_test.py::WorksheetTest::test_batch_update",
"tests/worksheet_test.py::WorksheetTest::test_cell",
"tests/worksheet_test.py::WorksheetTest::test_clear",
"tests/worksheet_test.py::WorksheetTest::test_delete_row",
"tests/worksheet_test.py::WorksheetTest::test_find",
"tests/worksheet_test.py::WorksheetTest::test_findall",
"tests/worksheet_test.py::WorksheetTest::test_format",
"tests/worksheet_test.py::WorksheetTest::test_freeze",
"tests/worksheet_test.py::WorksheetTest::test_get_all_records",
"tests/worksheet_test.py::WorksheetTest::test_get_all_records_different_header",
"tests/worksheet_test.py::WorksheetTest::test_get_all_records_duplicate_keys",
"tests/worksheet_test.py::WorksheetTest::test_get_all_records_expected_headers",
"tests/worksheet_test.py::WorksheetTest::test_get_all_records_numericise_unformatted",
"tests/worksheet_test.py::WorksheetTest::test_get_all_records_value_render_options",
"tests/worksheet_test.py::WorksheetTest::test_get_all_values",
"tests/worksheet_test.py::WorksheetTest::test_get_all_values_title_is_a1_notation",
"tests/worksheet_test.py::WorksheetTest::test_group_columns",
"tests/worksheet_test.py::WorksheetTest::test_group_rows",
"tests/worksheet_test.py::WorksheetTest::test_hide_columns_rows",
"tests/worksheet_test.py::WorksheetTest::test_insert_row",
"tests/worksheet_test.py::WorksheetTest::test_range",
"tests/worksheet_test.py::WorksheetTest::test_range_get_all_values",
"tests/worksheet_test.py::WorksheetTest::test_range_reversed",
"tests/worksheet_test.py::WorksheetTest::test_range_unbounded",
"tests/worksheet_test.py::WorksheetTest::test_reorder_worksheets",
"tests/worksheet_test.py::WorksheetTest::test_resize",
"tests/worksheet_test.py::WorksheetTest::test_sort",
"tests/worksheet_test.py::WorksheetTest::test_update_acell",
"tests/worksheet_test.py::WorksheetTest::test_update_and_get",
"tests/worksheet_test.py::WorksheetTest::test_update_cell",
"tests/worksheet_test.py::WorksheetTest::test_update_cell_multiline",
"tests/worksheet_test.py::WorksheetTest::test_update_cell_objects",
"tests/worksheet_test.py::WorksheetTest::test_update_cell_unicode",
"tests/worksheet_test.py::WorksheetTest::test_update_cells",
"tests/worksheet_test.py::WorksheetTest::test_update_cells_noncontiguous",
"tests/worksheet_test.py::WorksheetTest::test_update_cells_unicode",
"tests/worksheet_test.py::WorksheetTest::test_worksheet_notes",
"tests/worksheet_test.py::WorksheetTest::test_worksheet_update_index"
] | [] | MIT License | 12,631 | 269 | [
"gspread/worksheet.py"
] |
|
radiasoft__pykern-160 | 814541baf45bb9221a6c03aa766bd28fbd523ec5 | 2022-04-14 23:01:09 | ae582852dfe38aa625e572c3c4917e29c81d8e7d | gurhar1133: What do you think about the test here? https://github.com/radiasoft/pykern/pull/160/commits/b7c7a5b38ce212c55aed1b2f00ceeaf6f6de3789 @robnagler Maybe something a bit more clever would be better?
gurhar1133: Made the changes
had to invert test ;) | diff --git a/pykern/pkcli/__init__.py b/pykern/pkcli/__init__.py
index cb8bd7e..27cbf90 100644
--- a/pykern/pkcli/__init__.py
+++ b/pykern/pkcli/__init__.py
@@ -66,6 +66,42 @@ def command_error(fmt, *args, **kwargs):
raise CommandError(fmt.format(*args, **kwargs))
+class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
+ argparse.RawDescriptionHelpFormatter):
+ def _expand_help(self, action):
+ return super()._expand_help(action).split('\n')[0]
+
+class CustomParser(argparse.ArgumentParser):
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.program = kwargs.copy()
+ self.options = []
+
+ def format_help(self):
+ f = argh.PARSER_FORMATTER(prog=self.prog)
+ if not self.description:
+ f = CustomFormatter(prog=self.prog)
+ f.add_usage(
+ self.usage,
+ self._actions,
+ self._mutually_exclusive_groups
+ )
+ f.add_text(self.description)
+ for a in self._action_groups:
+ f.start_section(a.title)
+ f.add_text(a.description)
+ f.add_arguments(a._group_actions)
+ f.end_section()
+ f.add_text(self.epilog)
+ if not self.description:
+ return f.format_help().replace('positional arguments', 'commands')
+ return f.format_help()
+
+ def print_help(self):
+ print(self.format_help())
+
+
def main(root_pkg, argv=None):
"""Invokes module functions in :mod:`pykern.pkcli`
@@ -90,8 +126,7 @@ def main(root_pkg, argv=None):
if not cli:
return 1
prog = prog + ' ' + module_name
- parser = argparse.ArgumentParser(
- prog=prog, formatter_class=argh.PARSER_FORMATTER)
+ parser = CustomParser(prog)
cmds = _commands(cli)
dc = _default_command(cmds, argv)
if dc:
| pkcli docstring printing too greedy
When using the `pkcli` interaction with the middl project, I'm finding that the help printing seems a bit too greedy, printing the entire docstring instead of just the function summary. The command is below:
```bash
(middl) [joshec@roentgen middl]$ middlsoft train -h
usage: middlsoft train [-h] {regressor,vrae} ...
positional arguments:
{regressor,vrae}
regressor Train and run regressor to map latent space back to data
Args: configuration (str): path to configuration file in
model directory no_cuda (bool): whether or not to use
CUDA. Defaults to False. cuda_device (int): CUDA device
index. Defaults to -1 (all). do_write (bool): write
regressor output to file. Defaults to False. use_sklearn
(bool): use scikit-learn MLPRegressor vs pytorch. Defaults
to False. datadir (str): alternative directory to find
data in. Defaults to None. profile (bool): perform simple
profiling of pytorch model. Defaults to False. Returns:
None
vrae
optional arguments:
-h, --help show this help message and exit
```
This printing occurs on the disvae_losses branch of the middl project: [middl/middlsoft/pkcli/train.py](https://github.com/radiasoft/middl/blob/d8ebd67e8dff4ff563fc7be78220f785c1fb5ad3/middlsoft/pkcli/train.py#L253-L274) | radiasoft/pykern | diff --git a/tests/pkcli_data/p1/pkcli/conf1.py b/tests/pkcli_data/p1/pkcli/conf1.py
index 3feccce..057bb50 100644
--- a/tests/pkcli_data/p1/pkcli/conf1.py
+++ b/tests/pkcli_data/p1/pkcli/conf1.py
@@ -5,11 +5,21 @@ last_cmd = None
from pykern.pkdebug import pkdp
def cmd1(arg1):
+ """Subject line for cmd1
+
+ Args:
+ arg1
+ """
global last_cmd
last_cmd = cmd1
return
def cmd2():
+ """Subject line for cmd2
+
+ Args:
+ -
+ """
global last_cmd
last_cmd = cmd2
return
diff --git a/tests/pkcli_test.py b/tests/pkcli_test.py
index fcaeb56..09d6087 100644
--- a/tests/pkcli_test.py
+++ b/tests/pkcli_test.py
@@ -42,7 +42,7 @@ def test_main2(capsys):
_dev(rp, [], None, all_modules, capsys)
_dev(rp, ['--help'], None, all_modules, capsys)
_dev(rp, ['conf1'], SystemExit, r'cmd1,cmd2.*too few', capsys)
- _dev(rp, ['conf1', '-h'], SystemExit, r'\{cmd1,cmd2\}.*positional arguments', capsys)
+ _dev(rp, ['conf1', '-h'], SystemExit, r'\{cmd1,cmd2\}.*commands', capsys)
if six.PY2:
_dev(rp, ['not_found'], None, r'no module', capsys)
else:
@@ -91,8 +91,9 @@ def _dev(root_pkg, argv, exc, expect, capsys):
out, err = capsys.readouterr()
if not err:
err = out
- assert re.search(expect, err, flags=re.IGNORECASE+re.DOTALL) is not None, \
- 'Looking for {} in err={}'.format(expect, err)
+ assert re.search('Args.*arg1', err, flags=re.DOTALL) is None, \
+ 'failure to ignore arguments and only print subject. out: {}'.format(err)
+ pkunit.pkre(expect, err)
def _main(root_pkg, argv):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | stable | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"pip install -U pip setuptools"
],
"python": "3.7",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
argh==0.27.2
attrs @ file:///croot/attrs_1668696182826/work
Babel==2.14.0
bleach==6.0.0
cachetools==5.5.2
certifi @ file:///croot/certifi_1671487769961/work/certifi
cffi==1.15.1
chardet==5.2.0
charset-normalizer==3.4.1
colorama==0.4.6
cryptography==44.0.2
distlib==0.3.9
docutils==0.19
filelock==3.12.2
flit_core @ file:///opt/conda/conda-bld/flit-core_1644941570762/work/source/flit_core
future==1.0.0
github3.py==4.0.1
idna==3.10
imagesize==1.4.1
importlib-metadata==6.7.0
importlib-resources==5.12.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
jaraco.classes==3.2.3
jeepney==0.9.0
Jinja2==3.1.6
keyring==24.1.1
markdown-it-py==2.2.0
MarkupSafe==2.1.5
mdurl==0.1.2
more-itertools==9.1.0
packaging==24.0
path==16.6.0
path.py==12.5.0
pkginfo==1.10.0
platformdirs==4.0.0
pluggy==1.2.0
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
py-cpuinfo==9.0.0
pycparser==2.21
Pygments==2.17.2
PyJWT==2.8.0
-e git+https://github.com/radiasoft/pykern.git@814541baf45bb9221a6c03aa766bd28fbd523ec5#egg=pykern
pyproject-api==1.5.3
pytest==7.1.2
python-dateutil==2.9.0.post0
pytz==2025.2
readme-renderer==37.3
requests==2.31.0
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==13.8.1
ruamel.yaml==0.18.10
ruamel.yaml.clib==0.2.8
SecretStorage==3.3.3
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tox==4.8.0
twine==4.0.2
typing_extensions==4.7.1
uritemplate==4.1.1
urllib3==2.0.7
virtualenv==20.26.6
webencodings==0.5.1
zipp @ file:///croot/zipp_1672387121353/work
| name: pykern
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=22.1.0=py37h06a4308_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- flit-core=3.6.0=pyhd3eb1b0_0
- importlib_metadata=4.11.3=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- py=1.11.0=pyhd3eb1b0_0
- pytest=7.1.2=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py37h06a4308_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zipp=3.11.0=py37h06a4308_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- argh==0.27.2
- babel==2.14.0
- bleach==6.0.0
- cachetools==5.5.2
- cffi==1.15.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- colorama==0.4.6
- cryptography==44.0.2
- distlib==0.3.9
- docutils==0.19
- filelock==3.12.2
- future==1.0.0
- github3-py==4.0.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==6.7.0
- importlib-resources==5.12.0
- jaraco-classes==3.2.3
- jeepney==0.9.0
- jinja2==3.1.6
- keyring==24.1.1
- markdown-it-py==2.2.0
- markupsafe==2.1.5
- mdurl==0.1.2
- more-itertools==9.1.0
- packaging==24.0
- path==16.6.0
- path-py==12.5.0
- pip==24.0
- pkginfo==1.10.0
- platformdirs==4.0.0
- pluggy==1.2.0
- psutil==7.0.0
- py-cpuinfo==9.0.0
- pycparser==2.21
- pygments==2.17.2
- pyjwt==2.8.0
- pyproject-api==1.5.3
- python-dateutil==2.9.0.post0
- pytz==2025.2
- readme-renderer==37.3
- requests==2.31.0
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==13.8.1
- ruamel-yaml==0.18.10
- ruamel-yaml-clib==0.2.8
- secretstorage==3.3.3
- setuptools==56.2.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tox==4.8.0
- twine==4.0.2
- typing-extensions==4.7.1
- uritemplate==4.1.1
- urllib3==2.0.7
- virtualenv==20.26.6
- webencodings==0.5.1
prefix: /opt/conda/envs/pykern
| [
"tests/pkcli_test.py::test_main2"
] | [] | [
"tests/pkcli_test.py::test_command_error",
"tests/pkcli_test.py::test_main1",
"tests/pkcli_test.py::test_main3"
] | [] | Apache License 2.0 | 12,633 | 500 | [
"pykern/pkcli/__init__.py"
] |
marcosschroh__dataclasses-avroschema-172 | d88afb8df16039cf86fc0598e2d4c0431613987a | 2022-04-15 14:27:12 | d88afb8df16039cf86fc0598e2d4c0431613987a | codecov[bot]: # [Codecov](https://codecov.io/gh/marcosschroh/dataclasses-avroschema/pull/172?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Marcos+Schroh) Report
> Merging [#172](https://codecov.io/gh/marcosschroh/dataclasses-avroschema/pull/172?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Marcos+Schroh) (a39e8ad) into [master](https://codecov.io/gh/marcosschroh/dataclasses-avroschema/commit/d88afb8df16039cf86fc0598e2d4c0431613987a?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Marcos+Schroh) (d88afb8) will **not change** coverage.
> The diff coverage is `100.00%`.
```diff
@@ Coverage Diff @@
## master #172 +/- ##
=======================================
Coverage 99.65% 99.65%
=======================================
Files 10 10
Lines 869 869
Branches 151 151
=======================================
Hits 866 866
Misses 2 2
Partials 1 1
```
| [Impacted Files](https://codecov.io/gh/marcosschroh/dataclasses-avroschema/pull/172?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Marcos+Schroh) | Coverage Δ | |
|---|---|---|
| [dataclasses\_avroschema/fields.py](https://codecov.io/gh/marcosschroh/dataclasses-avroschema/pull/172/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Marcos+Schroh#diff-ZGF0YWNsYXNzZXNfYXZyb3NjaGVtYS9maWVsZHMucHk=) | `99.56% <100.00%> (ø)` | |
| [dataclasses\_avroschema/utils.py](https://codecov.io/gh/marcosschroh/dataclasses-avroschema/pull/172/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Marcos+Schroh#diff-ZGF0YWNsYXNzZXNfYXZyb3NjaGVtYS91dGlscy5weQ==) | `100.00% <100.00%> (ø)` | |
------
[Continue to review full report at Codecov](https://codecov.io/gh/marcosschroh/dataclasses-avroschema/pull/172?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Marcos+Schroh).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Marcos+Schroh)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/marcosschroh/dataclasses-avroschema/pull/172?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Marcos+Schroh). Last update [d88afb8...a39e8ad](https://codecov.io/gh/marcosschroh/dataclasses-avroschema/pull/172?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Marcos+Schroh). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=Marcos+Schroh).
| diff --git a/dataclasses_avroschema/fields.py b/dataclasses_avroschema/fields.py
index bb973af..8ec1397 100644
--- a/dataclasses_avroschema/fields.py
+++ b/dataclasses_avroschema/fields.py
@@ -685,8 +685,16 @@ class UUIDField(LogicalTypeField):
@dataclasses.dataclass
class RecordField(BaseField):
def get_avro_type(self) -> typing.Union[typing.List, typing.Dict]:
- alias = self.parent.metadata.get_alias(self.name) or self.model_metadata.get_alias(self.name) # type: ignore
- name = alias or self.type.__name__
+ meta = getattr(self.type, "Meta", None)
+ metadata = utils.SchemaMetadata.create(meta)
+
+ alias = self.parent.metadata.get_alias_nested_items(self.name) or metadata.get_alias_nested_items(self.name) # type: ignore # noqa E501
+
+ # The priority for the schema name
+ # 1. Check if the schema_name is present in the Meta class of own model
+ # 2. Check if exists an alias_nested_items in parent class or Meta class of own model
+ # 3. Use the default class Name (self.type.__name__)
+ name = metadata.schema_name or alias or self.type.__name__
if not self.exist_type():
user_defined_type = utils.UserDefinedType(name=name, type=self.type)
@@ -697,9 +705,6 @@ class RecordField(BaseField):
record_type = self.type.avro_schema_to_python(root=self.parent)
record_type["name"] = name
else:
- meta = getattr(self.type, "Meta", None)
- metadata = utils.SchemaMetadata.create(meta)
-
if metadata.namespace is None:
raise NameSpaceRequiredException(field_type=self.type, field_name=self.name)
record_type = f"{metadata.namespace}.{name}"
diff --git a/dataclasses_avroschema/utils.py b/dataclasses_avroschema/utils.py
index 606cb63..0d3722f 100644
--- a/dataclasses_avroschema/utils.py
+++ b/dataclasses_avroschema/utils.py
@@ -90,7 +90,7 @@ class SchemaMetadata:
alias_nested_items=getattr(klass, "alias_nested_items", {}),
)
- def get_alias(self, name: str) -> typing.Optional[str]:
+ def get_alias_nested_items(self, name: str) -> typing.Optional[str]:
return self.alias_nested_items.get(name)
| Nested metadata not respected
**Describe the bug**
If I override a `schema_name` attribute for a class that's used as a field, that schema_name isn't respected.
**To Reproduce**
```python
from dataclasses_avroschema import AvroModel
from dataclasses import dataclass
@dataclass
class MyClass(AvroModel):
field_1: str
class Meta:
schema_name = "custom_class" # <-- this is not respected
class MySecondClass(AvroModel):
field_2: MyClass
class Meta:
schema_name = "custom_name"
MySecondClass.avro_schema_to_python()
```
This outputs
```python
{'type': 'record',
'name': 'custom_name',
'fields': [{'name': 'field_2',
'type': {'type': 'record',
'name': 'MyClass', # <-- this line is wrong
'fields': [{'name': 'field_1', 'type': 'string'}],}}],
}
```
**Expected behavior**
I would expect
```python
{'type': 'record',
'name': 'custom_name',
'fields': [{'name': 'field_2',
'type': {'type': 'record',
'name': 'custom_class', # This is the important line
'fields': [{'name': 'field_1', 'type': 'string'}],}}],
}
```
| marcosschroh/dataclasses-avroschema | diff --git a/tests/schemas/test_schema.py b/tests/schemas/test_schema.py
index b9252ee..d1f6c20 100644
--- a/tests/schemas/test_schema.py
+++ b/tests/schemas/test_schema.py
@@ -151,6 +151,24 @@ def test_get_fields():
assert Parent.fake()
+def test_schema_name_from_relationship():
+ @dataclass
+ class MyClass(AvroModel):
+ field_1: str
+
+ class Meta:
+ schema_name = "custom_class"
+
+ class MySecondClass(AvroModel):
+ field_2: MyClass
+
+ class Meta:
+ schema_name = "custom_name"
+
+ schema = MySecondClass.avro_schema_to_python()
+ assert schema["fields"][0]["type"]["name"] == "custom_class"
+
+
def test_validate():
@dataclass
class User(AvroModel):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.29 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[pydantic]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiohappyeyeballs==2.6.1
aiohttp==3.11.14
aiohttp-cors==0.8.0
aiokafka==0.12.0
aiosignal==1.3.2
annotated-types==0.7.0
async-timeout==5.0.1
attrs==25.3.0
babel==2.17.0
backports.tarfile==1.2.0
backrefs==5.8
black==25.1.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
codecov==2.1.13
colorama==0.4.6
colorlog==6.9.0
coverage==7.8.0
croniter==2.0.7
cryptography==44.0.2
dacite==1.9.2
-e git+https://github.com/marcosschroh/dataclasses-avroschema.git@d88afb8df16039cf86fc0598e2d4c0431613987a#egg=dataclasses_avroschema
docutils==0.21.2
exceptiongroup==1.2.2
Faker==37.1.0
fastavro==1.10.0
faust-streaming==0.11.3
flake8==7.2.0
frozenlist==1.5.0
ghp-import==2.1.0
id==1.5.0
idna==3.10
importlib_metadata==8.6.1
inflect==7.5.0
iniconfig==2.1.0
intervaltree==3.1.0
isort==6.0.1
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
Jinja2==3.1.6
keyring==25.6.0
Markdown==3.7
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mccabe==0.7.0
mdurl==0.1.2
mergedeep==1.3.4
mkdocs==1.6.1
mkdocs-get-deps==0.2.0
mkdocs-material==9.6.10
mkdocs-material-extensions==1.3.1
mode-streaming==0.4.1
more-itertools==10.6.0
multidict==6.2.0
mypy==1.15.0
mypy-extensions==1.0.0
nh3==0.2.21
opentracing==2.4.0
packaging==24.2
paginate==0.5.7
pathspec==0.12.1
platformdirs==4.3.7
pluggy==1.5.0
propcache==0.3.1
pycodestyle==2.13.0
pycparser==2.22
pydantic==2.11.1
pydantic_core==2.33.0
pyflakes==3.3.1
Pygments==2.19.1
pymdown-extensions==10.14.3
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.2
pyyaml_env_tag==0.1
readme_renderer==44.0
requests==2.32.3
requests-toolbelt==1.0.0
rfc3986==2.0.0
rich==14.0.0
SecretStorage==3.3.3
six==1.17.0
sortedcontainers==2.4.0
stringcase==1.2.0
terminaltables==3.1.10
tomli==2.2.1
twine==6.1.0
typeguard==4.4.2
types-pytz==2025.2.0.20250326
typing-inspection==0.4.0
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
venusian==3.1.0
watchdog==6.0.0
yarl==1.18.3
zipp==3.21.0
| name: dataclasses-avroschema
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiohappyeyeballs==2.6.1
- aiohttp==3.11.14
- aiohttp-cors==0.8.0
- aiokafka==0.12.0
- aiosignal==1.3.2
- annotated-types==0.7.0
- async-timeout==5.0.1
- attrs==25.3.0
- babel==2.17.0
- backports-tarfile==1.2.0
- backrefs==5.8
- black==25.1.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- codecov==2.1.13
- colorama==0.4.6
- colorlog==6.9.0
- coverage==7.8.0
- croniter==2.0.7
- cryptography==44.0.2
- dacite==1.9.2
- dataclasses-avroschema==0.29.0
- docutils==0.21.2
- exceptiongroup==1.2.2
- faker==37.1.0
- fastavro==1.10.0
- faust-streaming==0.11.3
- flake8==7.2.0
- frozenlist==1.5.0
- ghp-import==2.1.0
- id==1.5.0
- idna==3.10
- importlib-metadata==8.6.1
- inflect==7.5.0
- iniconfig==2.1.0
- intervaltree==3.1.0
- isort==6.0.1
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- jinja2==3.1.6
- keyring==25.6.0
- markdown==3.7
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- mccabe==0.7.0
- mdurl==0.1.2
- mergedeep==1.3.4
- mkdocs==1.6.1
- mkdocs-get-deps==0.2.0
- mkdocs-material==9.6.10
- mkdocs-material-extensions==1.3.1
- mode-streaming==0.4.1
- more-itertools==10.6.0
- multidict==6.2.0
- mypy==1.15.0
- mypy-extensions==1.0.0
- nh3==0.2.21
- opentracing==2.4.0
- packaging==24.2
- paginate==0.5.7
- pathspec==0.12.1
- platformdirs==4.3.7
- pluggy==1.5.0
- propcache==0.3.1
- pycodestyle==2.13.0
- pycparser==2.22
- pydantic==2.11.1
- pydantic-core==2.33.0
- pyflakes==3.3.1
- pygments==2.19.1
- pymdown-extensions==10.14.3
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.2
- pyyaml-env-tag==0.1
- readme-renderer==44.0
- requests==2.32.3
- requests-toolbelt==1.0.0
- rfc3986==2.0.0
- rich==14.0.0
- secretstorage==3.3.3
- six==1.17.0
- sortedcontainers==2.4.0
- stringcase==1.2.0
- terminaltables==3.1.10
- tomli==2.2.1
- twine==6.1.0
- typeguard==4.4.2
- types-pytz==2025.2.0.20250326
- typing-extensions==4.13.0
- typing-inspection==0.4.0
- tzdata==2025.2
- urllib3==2.3.0
- venusian==3.1.0
- watchdog==6.0.0
- yarl==1.18.3
- zipp==3.21.0
prefix: /opt/conda/envs/dataclasses-avroschema
| [
"tests/schemas/test_schema.py::test_schema_name_from_relationship"
] | [] | [
"tests/schemas/test_schema.py::test_total_schema_fields_from_class",
"tests/schemas/test_schema.py::test_total_schema_fields_from_instance",
"tests/schemas/test_schema.py::test_schema_render_from_class_with_field_metadata",
"tests/schemas/test_schema.py::test_schema_render_from_class",
"tests/schemas/test_schema.py::test_schema_render_from_instance",
"tests/schemas/test_schema.py::test_schema_render_from_class_with_doc",
"tests/schemas/test_schema.py::test_schema_render_from_instance_with_doc",
"tests/schemas/test_schema.py::test_schema_documentation",
"tests/schemas/test_schema.py::test_schema_cached",
"tests/schemas/test_schema.py::test_extra_avro_attributes",
"tests/schemas/test_schema.py::test_class_empty_metaclass",
"tests/schemas/test_schema.py::test_invalid_schema_type",
"tests/schemas/test_schema.py::test_not_implementd_methods",
"tests/schemas/test_schema.py::test_namespace_required",
"tests/schemas/test_schema.py::test_inherit_dataclass_missing_docs",
"tests/schemas/test_schema.py::test_get_fields",
"tests/schemas/test_schema.py::test_validate",
"tests/schemas/test_schema.py::test_get_enum_type_map",
"tests/schemas/test_schema.py::test_get_enum_type_map_with_unions",
"tests/schemas/test_schema.py::test_get_enum_type_map_with_sub_record",
"tests/schemas/test_schema.py::test_deserialize_complex_types",
"tests/schemas/test_schema.py::test_deserialize_complex_types_invalid_enum_instance",
"tests/schemas/test_schema.py::test_parse_obj"
] | [] | MIT License | 12,637 | 574 | [
"dataclasses_avroschema/fields.py",
"dataclasses_avroschema/utils.py"
] |
David-Wobrock__sqlvalidator-43 | 2827c2a16d0dbfeba6b90fb2e8ee2bdacc6d0286 | 2022-04-15 16:25:05 | 2827c2a16d0dbfeba6b90fb2e8ee2bdacc6d0286 | diff --git a/sqlvalidator/grammar/tokeniser.py b/sqlvalidator/grammar/tokeniser.py
index df1e423..62f07fa 100644
--- a/sqlvalidator/grammar/tokeniser.py
+++ b/sqlvalidator/grammar/tokeniser.py
@@ -47,10 +47,12 @@ def get_tokens_until_one_of(tokens, stop_words, first_token=None, keep=None):
next_token = next(tokens, None)
count_parenthesis = 0 if first_token != "(" else 1
count_square_brackets = 0 if first_token != "[" else 1
+ count_case_expr = 0 if first_token != "case" else 1
while next_token is not None and not (
lower(next_token) in stop_words
and count_parenthesis <= 0
and count_square_brackets <= 0
+ and count_case_expr <= 0
and (
not argument_tokens
or (lower(argument_tokens[-1]), lower(next_token)) not in keep
@@ -65,6 +67,10 @@ def get_tokens_until_one_of(tokens, stop_words, first_token=None, keep=None):
count_square_brackets += 1
elif next_token == "]":
count_square_brackets -= 1
+ elif lower(next_token) == "case":
+ count_case_expr += 1
+ elif lower(next_token) == "end":
+ count_case_expr -= 1
next_token = next(tokens, None)
return argument_tokens, next_token
| AssertionError is raised when using a CASE statement in a CASE statement
Hi David!
Thank you for `sqlvalidator`! :)
The following query seems correct:
```sql
SELECT
CASE
WHEN CASE
WHEN code = 1 THEN 'VALID'
WHEN code = 2 THEN 'INVALID'
END
= 'VALID' THEN 'OK'
ELSE
'KO'
END
FROM table
```
However, `sqlvalidator` fails to validate/format it: an `AssertionError` is raised.
| David-Wobrock/sqlvalidator | diff --git a/tests/integration/test_formatting.py b/tests/integration/test_formatting.py
index 169b28c..80e6199 100644
--- a/tests/integration/test_formatting.py
+++ b/tests/integration/test_formatting.py
@@ -2182,3 +2182,26 @@ SELECT * REPLACE ("x" AS col)
FROM t
"""
assert format_sql(sql) == expected.strip()
+
+
+def test_nesting_case_expr():
+ sql = """SELECT CASE
+ WHEN CASE
+ WHEN code = 1 THEN 'VALID'
+ WHEN code = 2 THEN 'INVALID' END
+ = 'VALID' THEN 'OK' ELSE 'KO' END
+FROM table
+"""
+ expected = """
+SELECT CASE
+ WHEN
+ CASE
+ WHEN code = 1 THEN 'VALID'
+ WHEN code = 2 THEN 'INVALID'
+ END = 'VALID'
+ THEN 'OK'
+ ELSE 'KO'
+END
+FROM table
+"""
+ assert format_sql(sql) == expected.strip()
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy==1.0.0.dev0
py==1.11.0
pytest==6.2.4
-e git+https://github.com/David-Wobrock/sqlvalidator.git@2827c2a16d0dbfeba6b90fb2e8ee2bdacc6d0286#egg=sqlvalidator
toml==0.10.2
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: sqlvalidator
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- pluggy==1.0.0.dev0
- py==1.11.0
- pytest==6.2.4
- toml==0.10.2
prefix: /opt/conda/envs/sqlvalidator
| [
"tests/integration/test_formatting.py::test_nesting_case_expr"
] | [] | [
"tests/integration/test_formatting.py::test_format_select_star",
"tests/integration/test_formatting.py::test_upper_function_name",
"tests/integration/test_formatting.py::test_nested_function_name",
"tests/integration/test_formatting.py::test_extract_function",
"tests/integration/test_formatting.py::test_no_from_statement",
"tests/integration/test_formatting.py::test_simple_column",
"tests/integration/test_formatting.py::test_conditional_column",
"tests/integration/test_formatting.py::test_conditional_parenthesis_columns",
"tests/integration/test_formatting.py::test_simple_aliased_column",
"tests/integration/test_formatting.py::test_simple_aliased_as_column",
"tests/integration/test_formatting.py::test_select_except_one_line",
"tests/integration/test_formatting.py::test_select_except_multi_line",
"tests/integration/test_formatting.py::test_multiple_columns",
"tests/integration/test_formatting.py::test_parenthesis",
"tests/integration/test_formatting.py::test_basic_arithmetic",
"tests/integration/test_formatting.py::test_chained_arithmetic",
"tests/integration/test_formatting.py::test_nested_queries",
"tests/integration/test_formatting.py::test_nested_queries_multiple_columns",
"tests/integration/test_formatting.py::test_two_nested_queries",
"tests/integration/test_formatting.py::test_assert_no_semi_colon",
"tests/integration/test_formatting.py::test_quoted_from",
"tests/integration/test_formatting.py::test_where_clause_boolean_column",
"tests/integration/test_formatting.py::test_where_clause_boolean_equal",
"tests/integration/test_formatting.py::test_where_clause_boolean_is",
"tests/integration/test_formatting.py::test_where_clause_str",
"tests/integration/test_formatting.py::test_where_clause_columns",
"tests/integration/test_formatting.py::test_where_clause_parenthesis_expression",
"tests/integration/test_formatting.py::test_where_clause_boolean",
"tests/integration/test_formatting.py::test_where_clause_parenthesis_boolean",
"tests/integration/test_formatting.py::test_where_clause_multiple_parenthesis_booleans",
"tests/integration/test_formatting.py::test_where_clause_multiple_booleans",
"tests/integration/test_formatting.py::test_boolean_conditions_select_where",
"tests/integration/test_formatting.py::test_parenthesis_boolean_conditions_select_where",
"tests/integration/test_formatting.py::test_distinct_on_one_field",
"tests/integration/test_formatting.py::test_distinct_on_multiple_fields",
"tests/integration/test_formatting.py::test_empty_group_by",
"tests/integration/test_formatting.py::test_group_by",
"tests/integration/test_formatting.py::test_group_by_parenthesis",
"tests/integration/test_formatting.py::test_group_by_multiple_elements",
"tests/integration/test_formatting.py::test_group_by_multiple_elements_parenthesis",
"tests/integration/test_formatting.py::test_where_and_group_by",
"tests/integration/test_formatting.py::test_group_by_parenthesis_rollup",
"tests/integration/test_formatting.py::test_group_by_multiple_elements_parenthesis_rollup",
"tests/integration/test_formatting.py::test_where_and_having",
"tests/integration/test_formatting.py::test_order_by",
"tests/integration/test_formatting.py::test_order_by_mutliple_fields",
"tests/integration/test_formatting.py::test_order_by_mutliple_fields_order",
"tests/integration/test_formatting.py::test_limit",
"tests/integration/test_formatting.py::test_limit_all",
"tests/integration/test_formatting.py::test_offset",
"tests/integration/test_formatting.py::test_subquery_where",
"tests/integration/test_formatting.py::test_aliased_subquery",
"tests/integration/test_formatting.py::test_aliased_as_subquery",
"tests/integration/test_formatting.py::test_is_not_null_condition",
"tests/integration/test_formatting.py::test_filter_not_predicate",
"tests/integration/test_formatting.py::test_boolean_filter_not_predicate",
"tests/integration/test_formatting.py::test_boolean_filter_first_not_predicate",
"tests/integration/test_formatting.py::test_basic_join",
"tests/integration/test_formatting.py::test_parenthesis_join",
"tests/integration/test_formatting.py::test_join_on_clause",
"tests/integration/test_formatting.py::test_join_long_on_clause",
"tests/integration/test_formatting.py::test_join_long_on_clause_with_parenthesis",
"tests/integration/test_formatting.py::test_join_on_clause_boolean",
"tests/integration/test_formatting.py::test_two_parenthesis_joins_with_group_by",
"tests/integration/test_formatting.py::test_nested_joins",
"tests/integration/test_formatting.py::test_parenthesis_join_subquery",
"tests/integration/test_formatting.py::test_partitioning_function",
"tests/integration/test_formatting.py::test_partitioning_function_multiple_params",
"tests/integration/test_formatting.py::test_partitioning_function_multiple_params_with_frame",
"tests/integration/test_formatting.py::test_partitioning_function_order_by",
"tests/integration/test_formatting.py::test_partitioning_function_order_by_frame",
"tests/integration/test_formatting.py::test_partitioning_function_order_by_multiple",
"tests/integration/test_formatting.py::test_partitioning_function_order_by_no_partition",
"tests/integration/test_formatting.py::test_partitioning_function_order_by_no_partition_with_frame",
"tests/integration/test_formatting.py::test_partitioning_function_no_order_with_frame",
"tests/integration/test_formatting.py::test_partitioning_function_equals_with_alias",
"tests/integration/test_formatting.py::test_partitioning_function_empty",
"tests/integration/test_formatting.py::test_long_function",
"tests/integration/test_formatting.py::test_date_functions_field",
"tests/integration/test_formatting.py::test_case_expr",
"tests/integration/test_formatting.py::test_case_expr_multiple_fields",
"tests/integration/test_formatting.py::test_case",
"tests/integration/test_formatting.py::test_case_multiple_fields",
"tests/integration/test_formatting.py::test_case_no_else",
"tests/integration/test_formatting.py::test_index_access",
"tests/integration/test_formatting.py::test_array_with_subquery",
"tests/integration/test_formatting.py::test_array_with_subquery_multiple_args",
"tests/integration/test_formatting.py::test_unnest_with_offset",
"tests/integration/test_formatting.py::test_unnest_with_offset_alias",
"tests/integration/test_formatting.py::test_unnest_with_offset_both_aliased",
"tests/integration/test_formatting.py::test_function_calls",
"tests/integration/test_formatting.py::test_regex",
"tests/integration/test_formatting.py::test_multiple_regexes",
"tests/integration/test_formatting.py::test_parentheses_nested_select",
"tests/integration/test_formatting.py::test_union",
"tests/integration/test_formatting.py::test_intersect",
"tests/integration/test_formatting.py::test_except",
"tests/integration/test_formatting.py::test_query_combinations",
"tests/integration/test_formatting.py::test_long_function_calls",
"tests/integration/test_formatting.py::test_long_function_calls_multiple_fields",
"tests/integration/test_formatting.py::test_capitalized_column_name",
"tests/integration/test_formatting.py::test_capitalized_alias",
"tests/integration/test_formatting.py::test_break_long_where",
"tests/integration/test_formatting.py::test_break_long_where_nested_operations",
"tests/integration/test_formatting.py::test_break_long_parenthesis_where_nested_operations",
"tests/integration/test_formatting.py::test_cast_type",
"tests/integration/test_formatting.py::test_cast_in_condition",
"tests/integration/test_formatting.py::test_date_function",
"tests/integration/test_formatting.py::test_select_boolean_condition_expression",
"tests/integration/test_formatting.py::test_bitwise_operation",
"tests/integration/test_formatting.py::test_cross_join",
"tests/integration/test_formatting.py::test_implicit_cross_join",
"tests/integration/test_formatting.py::test_implicit_cross_join_unnest",
"tests/integration/test_formatting.py::test_multiple_joins_unnest",
"tests/integration/test_formatting.py::test_nested_multiple_joins_unnest",
"tests/integration/test_formatting.py::test_having_boolean_clause",
"tests/integration/test_formatting.py::test_if_with_date_column_name",
"tests/integration/test_formatting.py::test_long_nested_where",
"tests/integration/test_formatting.py::test_long_case_when_conditions_line_breaks",
"tests/integration/test_formatting.py::test_long_case_when_conditions_line_breaks_multi_column",
"tests/integration/test_formatting.py::test_where_in_subquery",
"tests/integration/test_formatting.py::test_table_in_square_brackets",
"tests/integration/test_formatting.py::test_union_all_nested_query",
"tests/integration/test_formatting.py::test_array",
"tests/integration/test_formatting.py::test_chained_field",
"tests/integration/test_formatting.py::test_multiple_table_name_with_cross_join",
"tests/integration/test_formatting.py::test_multiple_table_name_with_multiple_joins",
"tests/integration/test_formatting.py::test_where_with_offset_column",
"tests/integration/test_formatting.py::test_struct",
"tests/integration/test_formatting.py::test_array_agg",
"tests/integration/test_formatting.py::test_array_agg_order_by",
"tests/integration/test_formatting.py::test_distinct_array_agg_order_by",
"tests/integration/test_formatting.py::test_where_boolean_followed_by_group",
"tests/integration/test_formatting.py::test_date_column_in_function_call",
"tests/integration/test_formatting.py::test_floating_point_number",
"tests/integration/test_formatting.py::test_negative_floating_point_number",
"tests/integration/test_formatting.py::test_where_not_in_subquery",
"tests/integration/test_formatting.py::test_contains_predicate",
"tests/integration/test_formatting.py::test_bitwise_shifting",
"tests/integration/test_formatting.py::test_quoted_table_name",
"tests/integration/test_formatting.py::test_long_quoted_table_name",
"tests/integration/test_formatting.py::test_extract_date",
"tests/integration/test_formatting.py::test_count_distinct",
"tests/integration/test_formatting.py::test_count_distinct_with_function",
"tests/integration/test_formatting.py::test_count_equal_countif",
"tests/integration/test_formatting.py::test_invalid_join_missing_using_or_on",
"tests/integration/test_formatting.py::test_string_alias",
"tests/integration/test_formatting.py::test_index_chained_column",
"tests/integration/test_formatting.py::test_group_each_by",
"tests/integration/test_formatting.py::test_plain_join_each",
"tests/integration/test_formatting.py::test_escaped_char",
"tests/integration/test_formatting.py::test_filter_where",
"tests/integration/test_formatting.py::test_filter_where_alias",
"tests/integration/test_formatting.py::test_filter_where_alias_as",
"tests/integration/test_formatting.py::test_with_statement",
"tests/integration/test_formatting.py::test_multiple_with_statements",
"tests/integration/test_formatting.py::test_nested_with_statement",
"tests/integration/test_formatting.py::test_function_with_index_access",
"tests/integration/test_formatting.py::test_multiple_index_accesses",
"tests/integration/test_formatting.py::test_string_containing_quote",
"tests/integration/test_formatting.py::test_select_star_replace",
"tests/integration/test_formatting.py::test_select_star_multiple_replace",
"tests/integration/test_formatting.py::test_select_star_replace_nospace"
] | [] | MIT License | 12,640 | 344 | [
"sqlvalidator/grammar/tokeniser.py"
] |
|
python-cmd2__cmd2-1224 | 96acc5a36233f17edcb0925ddf4ff382a0fa0f77 | 2022-04-18 16:45:48 | 16c6d30dadbdfc273ae4bd075f861f54801290cd | codecov[bot]: # [Codecov](https://codecov.io/gh/python-cmd2/cmd2/pull/1224?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=python-cmd2) Report
> Merging [#1224](https://codecov.io/gh/python-cmd2/cmd2/pull/1224?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=python-cmd2) (00b2a3f) into [master](https://codecov.io/gh/python-cmd2/cmd2/commit/96acc5a36233f17edcb0925ddf4ff382a0fa0f77?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=python-cmd2) (96acc5a) will **increase** coverage by `0.00%`.
> The diff coverage is `94.73%`.
```diff
@@ Coverage Diff @@
## master #1224 +/- ##
=======================================
Coverage 98.52% 98.52%
=======================================
Files 22 22
Lines 5695 5698 +3
=======================================
+ Hits 5611 5614 +3
Misses 84 84
```
| [Impacted Files](https://codecov.io/gh/python-cmd2/cmd2/pull/1224?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=python-cmd2) | Coverage Δ | |
|---|---|---|
| [cmd2/utils.py](https://codecov.io/gh/python-cmd2/cmd2/pull/1224/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=python-cmd2#diff-Y21kMi91dGlscy5weQ==) | `97.98% <93.75%> (-0.15%)` | :arrow_down: |
| [cmd2/cmd2.py](https://codecov.io/gh/python-cmd2/cmd2/pull/1224/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=python-cmd2#diff-Y21kMi9jbWQyLnB5) | `98.20% <100.00%> (+0.03%)` | :arrow_up: |
| [cmd2/decorators.py](https://codecov.io/gh/python-cmd2/cmd2/pull/1224/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=python-cmd2#diff-Y21kMi9kZWNvcmF0b3JzLnB5) | `100.00% <100.00%> (ø)` | |
------
[Continue to review full report at Codecov](https://codecov.io/gh/python-cmd2/cmd2/pull/1224?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=python-cmd2).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=python-cmd2)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/python-cmd2/cmd2/pull/1224?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=python-cmd2). Last update [96acc5a...00b2a3f](https://codecov.io/gh/python-cmd2/cmd2/pull/1224?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=python-cmd2). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=python-cmd2).
| diff --git a/cmd2/cmd2.py b/cmd2/cmd2.py
index bd29e122..72b26566 100644
--- a/cmd2/cmd2.py
+++ b/cmd2/cmd2.py
@@ -139,6 +139,7 @@ from .table_creator import (
from .utils import (
Settable,
get_defining_class,
+ strip_doc_annotations,
)
# Set up readline
@@ -3808,23 +3809,7 @@ class Cmd(cmd.Cmd):
doc = cmd_func.__doc__
# Attempt to locate the first documentation block
- cmd_desc = ''
- if doc:
- found_first = False
- for doc_line in doc.splitlines():
- stripped_line = doc_line.strip()
-
- # Don't include :param type lines
- if stripped_line.startswith(':'):
- if found_first:
- break
- elif stripped_line:
- if found_first:
- cmd_desc += "\n"
- cmd_desc += stripped_line
- found_first = True
- elif found_first:
- break
+ cmd_desc = strip_doc_annotations(doc) if doc else ''
# Add this command to the table
table_row = topic_table.generate_data_row([command, cmd_desc])
diff --git a/cmd2/decorators.py b/cmd2/decorators.py
index e1aac3cf..4540af8b 100644
--- a/cmd2/decorators.py
+++ b/cmd2/decorators.py
@@ -29,6 +29,9 @@ from .exceptions import (
from .parsing import (
Statement,
)
+from .utils import (
+ strip_doc_annotations,
+)
if TYPE_CHECKING: # pragma: no cover
import cmd2
@@ -384,7 +387,7 @@ def with_argparser(
# If the description has not been set, then use the method docstring if one exists
if parser.description is None and func.__doc__:
- parser.description = func.__doc__
+ parser.description = strip_doc_annotations(func.__doc__)
# Set the command's help text as argparser.description (which can be None)
cmd_wrapper.__doc__ = parser.description
diff --git a/cmd2/utils.py b/cmd2/utils.py
index 855ad23e..5856b41a 100644
--- a/cmd2/utils.py
+++ b/cmd2/utils.py
@@ -1228,3 +1228,29 @@ class CustomCompletionSettings:
"""
self.parser = parser
self.preserve_quotes = preserve_quotes
+
+
+def strip_doc_annotations(doc: str) -> str:
+ """
+ Strip annotations from a docstring leaving only the text description
+
+ :param doc: documentation string
+ """
+ # Attempt to locate the first documentation block
+ cmd_desc = ''
+ found_first = False
+ for doc_line in doc.splitlines():
+ stripped_line = doc_line.strip()
+
+ # Don't include :param type lines
+ if stripped_line.startswith(':'):
+ if found_first:
+ break
+ elif stripped_line:
+ if found_first:
+ cmd_desc += "\n"
+ cmd_desc += stripped_line
+ found_first = True
+ elif found_first:
+ break
+ return cmd_desc
diff --git a/examples/arg_decorators.py b/examples/arg_decorators.py
index 7b1e2941..3b02835e 100755
--- a/examples/arg_decorators.py
+++ b/examples/arg_decorators.py
@@ -48,7 +48,11 @@ class ArgparsingApp(cmd2.Cmd):
@cmd2.with_argparser(pow_parser)
def do_pow(self, args: argparse.Namespace) -> None:
- """Raise an integer to a small integer exponent, either positive or negative"""
+ """
+ Raise an integer to a small integer exponent, either positive or negative
+
+ :param args: argparse arguments
+ """
self.poutput('{} ** {} == {}'.format(args.base, args.exponent, args.base**args.exponent))
| command function type annotations are pulled into command help
in `with_argparser`, when an explicit description is not provided we pull the functions `__doc__` property as the description. It currently pulls the entire __doc__ property including any documentation generation annotations. Would be cleaner to omit annotations.
Example:
```
@with_argparser(foo_args)
def do_foo(args: Namespace) -> None
"""
Does foo stuff
:param args: doc-annotation for args
:raises MyException: some notes about exceptions
"""
```
Results in something like:
```
> help foo
Usage: foo
Does foo stuff
:param args: doc-annotation for args
:raises MyException: some notes about exceptions
```
And would be cleaner like:
```
> help foo
Usage: foo
Does foo stuff
```
| python-cmd2/cmd2 | diff --git a/tests/test_argparse.py b/tests/test_argparse.py
index be5e0e72..070b506a 100644
--- a/tests/test_argparse.py
+++ b/tests/test_argparse.py
@@ -35,7 +35,13 @@ class ArgparseApp(cmd2.Cmd):
@cmd2.with_argparser(say_parser)
def do_say(self, args, *, keyword_arg: Optional[str] = None):
- """Repeat what you tell me to."""
+ """
+ Repeat what you
+ tell me to.
+
+ :param args: argparse namespace
+ :param keyword_arg: Optional keyword arguments
+ """
words = []
for word in args.words:
if word is None:
@@ -198,7 +204,10 @@ def test_argparse_help_docstring(argparse_app):
out, err = run_cmd(argparse_app, 'help say')
assert out[0].startswith('Usage: say')
assert out[1] == ''
- assert out[2] == 'Repeat what you tell me to.'
+ assert out[2] == 'Repeat what you'
+ assert out[3] == 'tell me to.'
+ for line in out:
+ assert not line.startswith(':')
def test_argparse_help_description(argparse_app):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 4
} | 2.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
anyio==4.9.0
argcomplete==3.6.1
attrs==25.3.0
babel==2.17.0
backports.tarfile==1.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
click==8.1.8
-e git+https://github.com/python-cmd2/cmd2.git@96acc5a36233f17edcb0925ddf4ff382a0fa0f77#egg=cmd2
codecov==2.1.13
colorama==0.4.6
colorlog==6.9.0
coverage==7.8.0
cryptography==44.0.2
dependency-groups==1.3.0
distlib==0.3.9
doc8==1.1.2
docutils==0.21.2
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
h11==0.14.0
id==1.5.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
invoke==2.2.0
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jeepney==0.9.0
Jinja2==3.1.6
keyring==25.6.0
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mccabe==0.7.0
mdurl==0.1.2
more-itertools==10.6.0
mypy==0.902
mypy_extensions==0.4.4
nh3==0.2.21
nox==2025.2.9
packaging==24.2
pbr==6.1.1
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pycparser==2.22
pyflakes==3.3.1
Pygments==2.19.1
pyperclip==1.9.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-mock==3.14.0
readme_renderer==44.0
requests==2.32.3
requests-toolbelt==1.0.0
restructuredtext_lint==1.4.0
rfc3986==2.0.0
rich==14.0.0
SecretStorage==3.3.3
sniffio==1.3.1
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinx-autobuild==2024.10.3
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
starlette==0.46.1
stevedore==5.4.1
toml==0.10.2
tomli==2.2.1
twine==6.1.0
typing_extensions==4.13.0
urllib3==2.3.0
uvicorn==0.34.0
virtualenv==20.29.3
watchfiles==1.0.4
wcwidth==0.2.13
websockets==15.0.1
zipp==3.21.0
| name: cmd2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- anyio==4.9.0
- argcomplete==3.6.1
- attrs==25.3.0
- babel==2.17.0
- backports-tarfile==1.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- click==8.1.8
- cmd2==2.4.1
- codecov==2.1.13
- colorama==0.4.6
- colorlog==6.9.0
- coverage==7.8.0
- cryptography==44.0.2
- dependency-groups==1.3.0
- distlib==0.3.9
- doc8==1.1.2
- docutils==0.21.2
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- h11==0.14.0
- id==1.5.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- invoke==2.2.0
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jeepney==0.9.0
- jinja2==3.1.6
- keyring==25.6.0
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- mccabe==0.7.0
- mdurl==0.1.2
- more-itertools==10.6.0
- mypy==0.902
- mypy-extensions==0.4.4
- nh3==0.2.21
- nox==2025.2.9
- packaging==24.2
- pbr==6.1.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pycparser==2.22
- pyflakes==3.3.1
- pygments==2.19.1
- pyperclip==1.9.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- readme-renderer==44.0
- requests==2.32.3
- requests-toolbelt==1.0.0
- restructuredtext-lint==1.4.0
- rfc3986==2.0.0
- rich==14.0.0
- secretstorage==3.3.3
- sniffio==1.3.1
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinx-autobuild==2024.10.3
- sphinx-rtd-theme==3.0.2
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- starlette==0.46.1
- stevedore==5.4.1
- toml==0.10.2
- tomli==2.2.1
- twine==6.1.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- uvicorn==0.34.0
- virtualenv==20.29.3
- watchfiles==1.0.4
- wcwidth==0.2.13
- websockets==15.0.1
- zipp==3.21.0
prefix: /opt/conda/envs/cmd2
| [
"tests/test_argparse.py::test_argparse_help_docstring"
] | [] | [
"tests/test_argparse.py::test_invalid_syntax",
"tests/test_argparse.py::test_argparse_basic_command",
"tests/test_argparse.py::test_argparse_remove_quotes",
"tests/test_argparse.py::test_argparser_kwargs",
"tests/test_argparse.py::test_argparse_preserve_quotes",
"tests/test_argparse.py::test_argparse_custom_namespace",
"tests/test_argparse.py::test_argparse_with_list",
"tests/test_argparse.py::test_argparse_with_list_remove_quotes",
"tests/test_argparse.py::test_argparse_with_list_preserve_quotes",
"tests/test_argparse.py::test_argparse_with_list_custom_namespace",
"tests/test_argparse.py::test_argparse_with_list_and_empty_doc",
"tests/test_argparse.py::test_argparser_correct_args_with_quotes_and_midline_options",
"tests/test_argparse.py::test_argparser_and_unknown_args_kwargs",
"tests/test_argparse.py::test_argparse_quoted_arguments_multiple",
"tests/test_argparse.py::test_argparse_help_description",
"tests/test_argparse.py::test_argparse_prog",
"tests/test_argparse.py::test_arglist",
"tests/test_argparse.py::test_arglist_kwargs",
"tests/test_argparse.py::test_preservelist",
"tests/test_argparse.py::test_subcommand_foo",
"tests/test_argparse.py::test_subcommand_bar",
"tests/test_argparse.py::test_subcommand_invalid",
"tests/test_argparse.py::test_subcommand_base_help",
"tests/test_argparse.py::test_subcommand_help",
"tests/test_argparse.py::test_subcommand_invalid_help",
"tests/test_argparse.py::test_add_another_subcommand",
"tests/test_argparse.py::test_subcmd_decorator",
"tests/test_argparse.py::test_unittest_mock",
"tests/test_argparse.py::test_pytest_mock_invalid",
"tests/test_argparse.py::test_pytest_mock_valid[spec_param0]",
"tests/test_argparse.py::test_pytest_mock_valid[spec_param1]",
"tests/test_argparse.py::test_pytest_mock_valid[spec_param2]"
] | [] | MIT License | 12,653 | 961 | [
"cmd2/cmd2.py",
"cmd2/decorators.py",
"cmd2/utils.py",
"examples/arg_decorators.py"
] |
pycontribs__jira-1364 | e00c1f7b0133ee43bccd698a7c925cb6e5b37db1 | 2022-04-19 21:52:26 | 5515bf38bd2bc3f573f8aca1c26378c7cc20a5d5 | diff --git a/jira/resilientsession.py b/jira/resilientsession.py
index dbd6b9c..ec01b6f 100644
--- a/jira/resilientsession.py
+++ b/jira/resilientsession.py
@@ -88,7 +88,7 @@ def raise_on_error(r: Optional[Response], verb="???", **kwargs):
class ResilientSession(Session):
"""This class is supposed to retry requests that do return temporary errors.
- At this moment it supports: 502, 503, 504
+ At this moment it supports: 429
"""
def __init__(self, timeout=None):
@@ -113,11 +113,23 @@ class ResilientSession(Session):
f"Got ConnectionError [{response}] errno:{response.errno} on {request} {url}\n{vars(response)}\n{response.__dict__}"
)
if isinstance(response, Response):
- if response.status_code in [502, 503, 504, 401]:
- # 401 UNAUTHORIZED still randomly returned by Atlassian Cloud as of 2017-01-16
+ if response.status_code in [429]:
+ number_of_tokens_issued_per_interval = response.headers[
+ "X-RateLimit-FillRate"
+ ]
+ token_issuing_rate_interval_seconds = response.headers[
+ "X-RateLimit-Interval-Seconds"
+ ]
+ maximum_number_of_tokens = response.headers["X-RateLimit-Limit"]
+ retry_after = response.headers["retry-after"]
msg = f"{response.status_code} {response.reason}"
- # 2019-07-25: Disabled recovery for codes above^
- return False
+ logging.warning(
+ f"Request rate limited by Jira: request should be retried after {retry_after} seconds.\n"
+ + f"{number_of_tokens_issued_per_interval} tokens are issued every {token_issuing_rate_interval_seconds} seconds. "
+ + f"You can accumulate up to {maximum_number_of_tokens} tokens.\n"
+ + "Consider adding an exemption for the user as explained in: "
+ + "https://confluence.atlassian.com/adminjiraserver/improving-instance-stability-with-rate-limiting-983794911.html"
+ )
elif not (
response.status_code == 200
and len(response.content) == 0
| Handle 429 rate limiting messages from Jira
**Is your feature request related to a problem? Please describe.**
Atlassian recommends usage of rate limiting with Jira in this blog post: https://confluence.atlassian.com/adminjiraserver/improving-instance-stability-with-rate-limiting-983794911.html
This would result in getting a 429 status code. [From what I can tell](https://github.com/pycontribs/jira/blob/91461fd736cd9a37cc136e07402c6f0b1e60170b/jira/resilientsession.py#L100) this library will never retry these requests.
**Describe the solution you'd like**
Use the existing exponential backoff logic to (eventually) retry a request that was ratelimited.
**Describe alternatives you've considered**
I have not really considered any alternative here.
**Additional context**
Admittedly this may be handled elsewhere and I may have missed it, I have not rolled out the ratelimiting yet so I do not have real world testing here. | pycontribs/jira | diff --git a/tests/test_resilientsession.py b/tests/test_resilientsession.py
index 831321c..023fc5c 100644
--- a/tests/test_resilientsession.py
+++ b/tests/test_resilientsession.py
@@ -1,6 +1,11 @@
import logging
+from unittest.mock import Mock, patch
+
+import pytest
+from requests import Response
import jira.resilientsession
+from jira.exceptions import JIRAError
from tests.conftest import JiraTestCase
@@ -53,3 +58,43 @@ class ResilientSessionLoggingConfidentialityTests(JiraTestCase):
def tearDown(self):
jira.resilientsession.logging.getLogger().removeHandler(self.loggingHandler)
del self.loggingHandler
+
+
+status_codes_retries_test_data = [
+ (429, 4, 3),
+ (401, 1, 0),
+ (403, 1, 0),
+ (404, 1, 0),
+ (502, 1, 0),
+ (503, 1, 0),
+ (504, 1, 0),
+]
+
+
+@patch("requests.Session.get")
+@patch("time.sleep")
[email protected](
+ "status_code,expected_number_of_retries,expected_number_of_sleep_invocations",
+ status_codes_retries_test_data,
+)
+def test_status_codes_retries(
+ mocked_sleep_method: Mock,
+ mocked_get_method: Mock,
+ status_code: int,
+ expected_number_of_retries: int,
+ expected_number_of_sleep_invocations: int,
+):
+ mocked_response: Response = Response()
+ mocked_response.status_code = status_code
+ mocked_response.headers["X-RateLimit-FillRate"] = "1"
+ mocked_response.headers["X-RateLimit-Interval-Seconds"] = "1"
+ mocked_response.headers["retry-after"] = "1"
+ mocked_response.headers["X-RateLimit-Limit"] = "1"
+ mocked_get_method.return_value = mocked_response
+ session: jira.resilientsession.ResilientSession = (
+ jira.resilientsession.ResilientSession()
+ )
+ with pytest.raises(JIRAError):
+ session.get("mocked_url")
+ assert mocked_get_method.call_count == expected_number_of_retries
+ assert mocked_sleep_method.call_count == expected_number_of_sleep_invocations
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 3.2 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .[opt,cli,test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asttokens==3.0.0
backports.tarfile==1.2.0
certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
coverage==7.8.0
cryptography==44.0.2
decorator==5.2.1
defusedxml==0.7.1
docutils==0.21.2
exceptiongroup==1.2.2
execnet==2.1.1
executing==2.2.0
filemagic==1.6
flaky==3.8.1
gssapi==1.9.0
idna==3.10
importlib_metadata==8.6.1
iniconfig==2.1.0
ipython==8.18.1
jaraco.classes==3.4.0
jaraco.context==6.0.1
jaraco.functools==4.1.0
jedi==0.19.2
jeepney==0.9.0
-e git+https://github.com/pycontribs/jira.git@e00c1f7b0133ee43bccd698a7c925cb6e5b37db1#egg=jira
keyring==25.6.0
krb5==0.7.1
MarkupSafe==3.0.2
matplotlib-inline==0.1.7
more-itertools==10.6.0
oauthlib==3.2.2
packaging==24.2
parso==0.8.4
pexpect==4.9.0
pluggy==1.5.0
prompt_toolkit==3.0.50
ptyprocess==0.7.0
pure_eval==0.2.3
py==1.11.0
pycparser==2.22
Pygments==2.19.1
PyJWT==2.10.1
pyspnego==0.11.2
pytest==8.3.5
pytest-cache==1.0
pytest-cov==6.0.0
pytest-instafail==0.5.0
pytest-sugar==1.0.0
pytest-timeout==2.3.1
pytest-xdist==3.6.1
PyYAML==6.0.2
requests==2.32.3
requests-jwt==0.6.0
requests-kerberos==0.15.0
requests-mock==1.12.1
requests-oauthlib==2.0.0
requests-toolbelt==1.0.0
requires.io==0.2.6
SecretStorage==3.3.3
stack-data==0.6.3
tenacity==9.0.0
termcolor==2.5.0
tomli==2.2.1
traitlets==5.14.3
typing_extensions==4.13.0
urllib3==2.3.0
wcwidth==0.2.13
xmlrunner==1.7.7
yanc==0.3.3
zipp==3.21.0
| name: jira
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asttokens==3.0.0
- backports-tarfile==1.2.0
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- coverage==7.8.0
- cryptography==44.0.2
- decorator==5.2.1
- defusedxml==0.7.1
- docutils==0.21.2
- exceptiongroup==1.2.2
- execnet==2.1.1
- executing==2.2.0
- filemagic==1.6
- flaky==3.8.1
- gssapi==1.9.0
- idna==3.10
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipython==8.18.1
- jaraco-classes==3.4.0
- jaraco-context==6.0.1
- jaraco-functools==4.1.0
- jedi==0.19.2
- jeepney==0.9.0
- jira==3.2.1.dev5+ge00c1f7
- keyring==25.6.0
- krb5==0.7.1
- markupsafe==3.0.2
- matplotlib-inline==0.1.7
- more-itertools==10.6.0
- oauthlib==3.2.2
- packaging==24.2
- parso==0.8.4
- pexpect==4.9.0
- pluggy==1.5.0
- prompt-toolkit==3.0.50
- ptyprocess==0.7.0
- pure-eval==0.2.3
- py==1.11.0
- pycparser==2.22
- pygments==2.19.1
- pyjwt==2.10.1
- pyspnego==0.11.2
- pytest==8.3.5
- pytest-cache==1.0
- pytest-cov==6.0.0
- pytest-instafail==0.5.0
- pytest-sugar==1.0.0
- pytest-timeout==2.3.1
- pytest-xdist==3.6.1
- pyyaml==6.0.2
- requests==2.32.3
- requests-jwt==0.6.0
- requests-kerberos==0.15.0
- requests-mock==1.12.1
- requests-oauthlib==2.0.0
- requests-toolbelt==1.0.0
- requires-io==0.2.6
- secretstorage==3.3.3
- stack-data==0.6.3
- tenacity==9.0.0
- termcolor==2.5.0
- tomli==2.2.1
- traitlets==5.14.3
- typing-extensions==4.13.0
- urllib3==2.3.0
- wcwidth==0.2.13
- xmlrunner==1.7.7
- yanc==0.3.3
- zipp==3.21.0
prefix: /opt/conda/envs/jira
| [
"tests/test_resilientsession.py::test_status_codes_retries[429-4-3]"
] | [] | [
"tests/test_resilientsession.py::ResilientSessionLoggingConfidentialityTests::test_logging_with_connection_error",
"tests/test_resilientsession.py::test_status_codes_retries[401-1-0]",
"tests/test_resilientsession.py::test_status_codes_retries[403-1-0]",
"tests/test_resilientsession.py::test_status_codes_retries[404-1-0]",
"tests/test_resilientsession.py::test_status_codes_retries[502-1-0]",
"tests/test_resilientsession.py::test_status_codes_retries[503-1-0]",
"tests/test_resilientsession.py::test_status_codes_retries[504-1-0]"
] | [] | BSD 2-Clause "Simplified" License | 12,663 | 580 | [
"jira/resilientsession.py"
] |
|
googleapis__python-pubsub-662 | 1a07d7ce3b3580191f74b7895dd1b8afb13baccb | 2022-04-19 21:54:18 | 950fbce009fd56a55feea971f8e6083fa84d54fc | diff --git a/google/cloud/pubsub_v1/subscriber/_protocol/dispatcher.py b/google/cloud/pubsub_v1/subscriber/_protocol/dispatcher.py
index 9161616..c6dbf06 100644
--- a/google/cloud/pubsub_v1/subscriber/_protocol/dispatcher.py
+++ b/google/cloud/pubsub_v1/subscriber/_protocol/dispatcher.py
@@ -28,6 +28,9 @@ from google.api_core.retry import exponential_sleep_generator
from google.cloud.pubsub_v1.subscriber._protocol import helper_threads
from google.cloud.pubsub_v1.subscriber._protocol import requests
+from google.cloud.pubsub_v1.subscriber.exceptions import (
+ AcknowledgeStatus,
+)
if typing.TYPE_CHECKING: # pragma: NO COVER
import queue
@@ -128,17 +131,50 @@ class Dispatcher(object):
nack_requests: List[requests.NackRequest] = []
drop_requests: List[requests.DropRequest] = []
+ lease_ids = set()
+ modack_ids = set()
+ ack_ids = set()
+ nack_ids = set()
+ drop_ids = set()
+ exactly_once_delivery_enabled = self._manager._exactly_once_delivery_enabled()
+
for item in items:
if isinstance(item, requests.LeaseRequest):
- lease_requests.append(item)
+ if (
+ item.ack_id not in lease_ids
+ ): # LeaseRequests have no futures to handle.
+ lease_ids.add(item.ack_id)
+ lease_requests.append(item)
elif isinstance(item, requests.ModAckRequest):
- modack_requests.append(item)
+ if item.ack_id in modack_ids:
+ self._handle_duplicate_request_future(
+ exactly_once_delivery_enabled, item
+ )
+ else:
+ modack_ids.add(item.ack_id)
+ modack_requests.append(item)
elif isinstance(item, requests.AckRequest):
- ack_requests.append(item)
+ if item.ack_id in ack_ids:
+ self._handle_duplicate_request_future(
+ exactly_once_delivery_enabled, item
+ )
+ else:
+ ack_ids.add(item.ack_id)
+ ack_requests.append(item)
elif isinstance(item, requests.NackRequest):
- nack_requests.append(item)
+ if item.ack_id in nack_ids:
+ self._handle_duplicate_request_future(
+ exactly_once_delivery_enabled, item
+ )
+ else:
+ nack_ids.add(item.ack_id)
+ nack_requests.append(item)
elif isinstance(item, requests.DropRequest):
- drop_requests.append(item)
+ if (
+ item.ack_id not in drop_ids
+ ): # DropRequests have no futures to handle.
+ drop_ids.add(item.ack_id)
+ drop_requests.append(item)
else:
warnings.warn(
f'Skipping unknown request item of type "{type(item)}"',
@@ -164,6 +200,29 @@ class Dispatcher(object):
if drop_requests:
self.drop(drop_requests)
+ def _handle_duplicate_request_future(
+ self,
+ exactly_once_delivery_enabled: bool,
+ item: Union[requests.AckRequest, requests.ModAckRequest, requests.NackRequest],
+ ) -> None:
+ _LOGGER.debug(
+ "This is a duplicate %s with the same ack_id: %s.",
+ type(item),
+ item.ack_id,
+ )
+ if item.future:
+ if exactly_once_delivery_enabled:
+ item.future.set_exception(
+ ValueError(f"Duplicate ack_id for {type(item)}")
+ )
+ # Futures may be present even with exactly-once delivery
+ # disabled, in transition periods after the setting is changed on
+ # the subscription.
+ else:
+ # When exactly-once delivery is NOT enabled, acks/modacks are considered
+ # best-effort, so the future should succeed even though this is a duplicate.
+ item.future.set_result(AcknowledgeStatus.SUCCESS)
+
def ack(self, items: Sequence[requests.AckRequest]) -> None:
"""Acknowledge the given messages.
| Assert fails with duplicate AckRequests
#### Steps to reproduce
1. Ack the same message multiple times on a streaming pull subscription.
This assertion fails when ack duplication occurs: https://github.com/googleapis/python-pubsub/blob/20d661c8562cc1f777ac7b3f1ba03dcad7a831c0/google/cloud/pubsub_v1/subscriber/_protocol/streaming_pull_manager.py#L588
There is no de-duplication of acks in the dispatcher, and because ack_reqs_dict is a Dict[str, AckRequest], the latest AckRequest will replace an earlier request.
The solution is either to change ack_req_dict to allow for multiple AckRequests for the same message, or to de-duplicate acks in the dispatcher. | googleapis/python-pubsub | diff --git a/tests/unit/pubsub_v1/subscriber/test_dispatcher.py b/tests/unit/pubsub_v1/subscriber/test_dispatcher.py
index c1de19e..c6902da 100644
--- a/tests/unit/pubsub_v1/subscriber/test_dispatcher.py
+++ b/tests/unit/pubsub_v1/subscriber/test_dispatcher.py
@@ -24,6 +24,9 @@ from google.cloud.pubsub_v1.subscriber import futures
import mock
import pytest
+from google.cloud.pubsub_v1.subscriber.exceptions import (
+ AcknowledgeStatus,
+)
@pytest.mark.parametrize(
@@ -48,6 +51,7 @@ def test_dispatch_callback_active_manager(item, method_name):
dispatcher_.dispatch_callback(items)
method.assert_called_once_with([item])
+ manager._exactly_once_delivery_enabled.assert_called()
@pytest.mark.parametrize(
@@ -73,6 +77,274 @@ def test_dispatch_callback_inactive_manager(item, method_name):
dispatcher_.dispatch_callback(items)
method.assert_called_once_with([item])
+ manager._exactly_once_delivery_enabled.assert_called()
+
+
[email protected](
+ "items,method_name",
+ [
+ (
+ [
+ requests.AckRequest("0", 0, 0, "", None),
+ requests.AckRequest("0", 0, 1, "", None),
+ ],
+ "ack",
+ ),
+ (
+ [
+ requests.DropRequest("0", 0, ""),
+ requests.DropRequest("0", 1, ""),
+ ],
+ "drop",
+ ),
+ (
+ [
+ requests.LeaseRequest("0", 0, ""),
+ requests.LeaseRequest("0", 1, ""),
+ ],
+ "lease",
+ ),
+ (
+ [
+ requests.ModAckRequest("0", 0, None),
+ requests.ModAckRequest("0", 1, None),
+ ],
+ "modify_ack_deadline",
+ ),
+ (
+ [
+ requests.NackRequest("0", 0, "", None),
+ requests.NackRequest("0", 1, "", None),
+ ],
+ "nack",
+ ),
+ ],
+)
+def test_dispatch_duplicate_items_callback_active_manager_no_futures(
+ items, method_name
+):
+ manager = mock.create_autospec(
+ streaming_pull_manager.StreamingPullManager, instance=True
+ )
+ dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)
+
+ manager._exactly_once_delivery_enabled.return_value = False
+ with mock.patch.object(dispatcher_, method_name) as method:
+ dispatcher_.dispatch_callback(items)
+
+ method.assert_called_once_with([items[0]])
+ manager._exactly_once_delivery_enabled.assert_called()
+
+
[email protected](
+ "items,method_name",
+ [
+ (
+ [
+ requests.AckRequest("0", 0, 0, "", None),
+ requests.AckRequest("0", 0, 1, "", futures.Future()),
+ ],
+ "ack",
+ ),
+ (
+ [
+ requests.DropRequest("0", 0, ""),
+ requests.DropRequest("0", 1, ""),
+ ],
+ "drop",
+ ),
+ (
+ [
+ requests.LeaseRequest("0", 0, ""),
+ requests.LeaseRequest("0", 1, ""),
+ ],
+ "lease",
+ ),
+ (
+ [
+ requests.ModAckRequest("0", 0, None),
+ requests.ModAckRequest("0", 1, futures.Future()),
+ ],
+ "modify_ack_deadline",
+ ),
+ (
+ [
+ requests.NackRequest("0", 0, "", None),
+ requests.NackRequest("0", 1, "", futures.Future()),
+ ],
+ "nack",
+ ),
+ ],
+)
+def test_dispatch_duplicate_items_callback_active_manager_with_futures_no_eod(
+ items, method_name
+):
+ manager = mock.create_autospec(
+ streaming_pull_manager.StreamingPullManager, instance=True
+ )
+ dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)
+
+ manager._exactly_once_delivery_enabled.return_value = False
+ with mock.patch.object(dispatcher_, method_name) as method:
+ dispatcher_.dispatch_callback(items)
+
+ method.assert_called_once_with([items[0]])
+ manager._exactly_once_delivery_enabled.assert_called()
+
+ if method_name != "drop" and method_name != "lease":
+ assert items[1].future.result() == AcknowledgeStatus.SUCCESS
+
+
[email protected](
+ "items,method_name",
+ [
+ (
+ [
+ requests.AckRequest("0", 0, 0, "", None),
+ requests.AckRequest("0", 0, 1, "", futures.Future()),
+ ],
+ "ack",
+ ),
+ (
+ [
+ requests.DropRequest("0", 0, ""),
+ requests.DropRequest("0", 1, ""),
+ ],
+ "drop",
+ ),
+ (
+ [
+ requests.LeaseRequest("0", 0, ""),
+ requests.LeaseRequest("0", 1, ""),
+ ],
+ "lease",
+ ),
+ (
+ [
+ requests.ModAckRequest("0", 0, None),
+ requests.ModAckRequest("0", 1, futures.Future()),
+ ],
+ "modify_ack_deadline",
+ ),
+ (
+ [
+ requests.NackRequest("0", 0, "", None),
+ requests.NackRequest("0", 1, "", futures.Future()),
+ ],
+ "nack",
+ ),
+ ],
+)
+def test_dispatch_duplicate_items_callback_active_manager_with_futures_eod(
+ items, method_name
+):
+ manager = mock.create_autospec(
+ streaming_pull_manager.StreamingPullManager, instance=True
+ )
+ dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)
+
+ manager._exactly_once_delivery_enabled.return_value = True
+ with mock.patch.object(dispatcher_, method_name) as method:
+ dispatcher_.dispatch_callback(items)
+
+ method.assert_called_once_with([items[0]])
+ manager._exactly_once_delivery_enabled.assert_called()
+
+ if method_name != "drop" and method_name != "lease":
+ with pytest.raises(ValueError) as err:
+ items[1].future.result()
+ assert err.errisinstance(ValueError)
+
+
+def test_dispatch_duplicate_items_diff_types_callback_active_manager_with_futures_eod():
+ ack_future = futures.Future()
+ ack_request = requests.AckRequest("0", 0, 1, "", ack_future)
+ drop_request = requests.DropRequest("0", 1, "")
+ lease_request = requests.LeaseRequest("0", 1, "")
+ nack_future = futures.Future()
+ nack_request = requests.NackRequest("0", 1, "", nack_future)
+ modack_future = futures.Future()
+ modack_request = requests.ModAckRequest("0", 1, modack_future)
+
+ items = [ack_request, drop_request, lease_request, nack_request, modack_request]
+
+ manager = mock.create_autospec(
+ streaming_pull_manager.StreamingPullManager, instance=True
+ )
+ dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)
+
+ manager._exactly_once_delivery_enabled.return_value = True
+ with mock.patch.multiple(
+ dispatcher_,
+ ack=mock.DEFAULT,
+ nack=mock.DEFAULT,
+ drop=mock.DEFAULT,
+ lease=mock.DEFAULT,
+ modify_ack_deadline=mock.DEFAULT,
+ ):
+ dispatcher_.dispatch_callback(items)
+ manager._exactly_once_delivery_enabled.assert_called()
+ dispatcher_.ack.assert_called_once_with([ack_request])
+ dispatcher_.drop.assert_called_once_with([drop_request])
+ dispatcher_.lease.assert_called_once_with([lease_request])
+ dispatcher_.nack.assert_called_once_with([nack_request])
+ dispatcher_.modify_ack_deadline.assert_called_once_with([modack_request])
+
+
[email protected](
+ "items,method_name",
+ [
+ (
+ [
+ requests.AckRequest("0", 0, 0, "", None),
+ requests.AckRequest("0", 0, 1, "", None),
+ ],
+ "ack",
+ ),
+ (
+ [
+ requests.DropRequest("0", 0, ""),
+ requests.DropRequest("0", 1, ""),
+ ],
+ "drop",
+ ),
+ (
+ [
+ requests.LeaseRequest("0", 0, ""),
+ requests.LeaseRequest("0", 1, ""),
+ ],
+ "lease",
+ ),
+ (
+ [
+ requests.ModAckRequest("0", 0, None),
+ requests.ModAckRequest("0", 1, None),
+ ],
+ "modify_ack_deadline",
+ ),
+ (
+ [
+ requests.NackRequest("0", 0, "", None),
+ requests.NackRequest("0", 1, "", None),
+ ],
+ "nack",
+ ),
+ ],
+)
+def test_dispatch_duplicate_items_callback_active_manager_no_futures_eod(
+ items, method_name
+):
+ manager = mock.create_autospec(
+ streaming_pull_manager.StreamingPullManager, instance=True
+ )
+ dispatcher_ = dispatcher.Dispatcher(manager, mock.sentinel.queue)
+
+ manager._exactly_once_delivery_enabled.return_value = True
+ with mock.patch.object(dispatcher_, method_name) as method:
+ dispatcher_.dispatch_callback(items)
+
+ method.assert_called_once_with([items[0]])
+ manager._exactly_once_delivery_enabled.assert_called()
def test_unknown_request_type():
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-asyncio",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
google-api-core==2.24.2
google-auth==2.38.0
-e git+https://github.com/googleapis/python-pubsub.git@1a07d7ce3b3580191f74b7895dd1b8afb13baccb#egg=google_cloud_pubsub
googleapis-common-protos==1.69.2
grpc-google-iam-v1==0.12.7
grpcio==1.71.0
grpcio-status==1.63.0rc1
idna==3.10
iniconfig==2.1.0
mock==5.2.0
packaging==24.2
pluggy==1.5.0
proto-plus==1.26.1
protobuf==4.25.6
pyasn1==0.6.1
pyasn1_modules==0.4.2
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
requests==2.32.3
rsa==4.9
tomli==2.2.1
typing_extensions==4.13.0
urllib3==2.3.0
| name: python-pubsub
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- google-api-core==2.24.2
- google-auth==2.38.0
- googleapis-common-protos==1.69.2
- grpc-google-iam-v1==0.12.7
- grpcio==1.71.0
- grpcio-status==1.63.0rc1
- idna==3.10
- iniconfig==2.1.0
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- proto-plus==1.26.1
- protobuf==4.25.6
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- requests==2.32.3
- rsa==4.9
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==2.3.0
prefix: /opt/conda/envs/python-pubsub
| [
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_callback_active_manager[item0-ack]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_callback_active_manager[item1-drop]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_callback_active_manager[item2-lease]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_callback_active_manager[item3-modify_ack_deadline]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_callback_active_manager[item4-nack]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_callback_inactive_manager[item0-ack]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_callback_inactive_manager[item1-drop]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_callback_inactive_manager[item2-lease]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_callback_inactive_manager[item3-modify_ack_deadline]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_callback_inactive_manager[item4-nack]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_no_futures[items0-ack]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_no_futures[items1-drop]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_no_futures[items2-lease]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_no_futures[items3-modify_ack_deadline]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_no_futures[items4-nack]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_with_futures_no_eod[items0-ack]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_with_futures_no_eod[items1-drop]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_with_futures_no_eod[items2-lease]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_with_futures_no_eod[items3-modify_ack_deadline]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_with_futures_no_eod[items4-nack]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_with_futures_eod[items0-ack]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_with_futures_eod[items1-drop]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_with_futures_eod[items2-lease]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_with_futures_eod[items3-modify_ack_deadline]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_with_futures_eod[items4-nack]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_diff_types_callback_active_manager_with_futures_eod",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_no_futures_eod[items0-ack]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_no_futures_eod[items1-drop]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_no_futures_eod[items2-lease]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_no_futures_eod[items3-modify_ack_deadline]",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_dispatch_duplicate_items_callback_active_manager_no_futures_eod[items4-nack]"
] | [] | [
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_unknown_request_type",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_ack",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_ack_no_time",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_ack_splitting_large_payload",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_retry_acks_in_new_thread",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_retry_acks",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_retry_modacks_in_new_thread",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_retry_modacks",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_lease",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_drop_unordered_messages",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_drop_ordered_messages",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_nack",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_modify_ack_deadline",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_modify_ack_deadline_splitting_large_payload",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_start",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_start_already_started",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_stop",
"tests/unit/pubsub_v1/subscriber/test_dispatcher.py::test_stop_no_join"
] | [] | Apache License 2.0 | 12,664 | 920 | [
"google/cloud/pubsub_v1/subscriber/_protocol/dispatcher.py"
] |
|
miurahr__aqtinstall-518 | 320df539c0cd050686022e004805282bd86e7760 | 2022-04-20 01:31:23 | 068302bca58f92eaab815dc672bebeb7e02b8ae3 | diff --git a/aqt/exceptions.py b/aqt/exceptions.py
index f02f032..d56fd4f 100644
--- a/aqt/exceptions.py
+++ b/aqt/exceptions.py
@@ -18,14 +18,14 @@
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-from typing import Iterable
+from typing import List
DOCS_CONFIG = "https://aqtinstall.readthedocs.io/en/stable/configuration.html#configuration"
class AqtException(Exception):
def __init__(self, *args, **kwargs):
- self.suggested_action: Iterable[str] = kwargs.pop("suggested_action", [])
+ self.suggested_action: List[str] = kwargs.pop("suggested_action", [])
self.should_show_help: bool = kwargs.pop("should_show_help", False)
super(AqtException, self).__init__(*args, **kwargs)
@@ -40,6 +40,9 @@ class AqtException(Exception):
["* " + suggestion for suggestion in self.suggested_action]
)
+ def append_suggested_follow_up(self, suggestions: List[str]):
+ self.suggested_action.extend(suggestions)
+
class ArchiveDownloadError(AqtException):
pass
diff --git a/aqt/metadata.py b/aqt/metadata.py
index 8c95cfc..7be73e6 100644
--- a/aqt/metadata.py
+++ b/aqt/metadata.py
@@ -450,7 +450,7 @@ class MetadataFactory:
return arches
def fetch_extensions(self, version: Version) -> List[str]:
- versions_extensions = MetadataFactory.get_versions_extensions(
+ versions_extensions = self.get_versions_extensions(
self.fetch_http(self.archive_id.to_url(), False), self.archive_id.category
)
filtered = filter(
@@ -467,7 +467,7 @@ class MetadataFactory:
def get_version(ver_ext: Tuple[Version, str]):
return ver_ext[0]
- versions_extensions = MetadataFactory.get_versions_extensions(
+ versions_extensions = self.get_versions_extensions(
self.fetch_http(self.archive_id.to_url(), False), self.archive_id.category
)
versions = sorted(filter(None, map(get_version, filter(filter_by, versions_extensions))))
@@ -479,7 +479,7 @@ class MetadataFactory:
def fetch_tools(self) -> List[str]:
html_doc = self.fetch_http(self.archive_id.to_url(), False)
- return list(MetadataFactory.iterate_folders(html_doc, "tools"))
+ return list(self.iterate_folders(html_doc, "tools"))
def fetch_tool_modules(self, tool_name: str) -> List[str]:
tool_data = self._fetch_module_metadata(tool_name)
@@ -588,24 +588,32 @@ class MetadataFactory:
f"Connection to '{base_url}' failed. Retrying with fallback '{base_urls[i + 1]}'."
)
- @staticmethod
- def iterate_folders(html_doc: str, filter_category: str = "") -> Generator[str, None, None]:
+ def iterate_folders(self, html_doc: str, filter_category: str = "") -> Generator[str, None, None]:
def table_row_to_folder(tr: bs4.element.Tag) -> str:
try:
return tr.find_all("td")[1].a.contents[0].rstrip("/")
except (AttributeError, IndexError):
return ""
- soup: bs4.BeautifulSoup = bs4.BeautifulSoup(html_doc, "html.parser")
- for row in soup.body.table.find_all("tr"):
- content: str = table_row_to_folder(row)
- if not content or content == "Parent Directory":
- continue
- if content.startswith(filter_category):
- yield content
-
- @staticmethod
- def get_versions_extensions(html_doc: str, category: str) -> Iterator[Tuple[Optional[Version], str]]:
+ try:
+ soup: bs4.BeautifulSoup = bs4.BeautifulSoup(html_doc, "html.parser")
+ for row in soup.body.table.find_all("tr"):
+ content: str = table_row_to_folder(row)
+ if not content or content == "Parent Directory":
+ continue
+ if content.startswith(filter_category):
+ yield content
+ except Exception as e:
+ url = posixpath.join(Settings.baseurl, self.archive_id.to_url())
+ raise ArchiveConnectionError(
+ f"Failed to retrieve the expected HTML page at {url}",
+ suggested_action=[
+ "Check your network connection.",
+ f"Make sure that you can access {url} in your web browser.",
+ ],
+ ) from e
+
+ def get_versions_extensions(self, html_doc: str, category: str) -> Iterator[Tuple[Optional[Version], str]]:
def folder_to_version_extension(folder: str) -> Tuple[Optional[Version], str]:
components = folder.split("_", maxsplit=2)
ext = "" if len(components) < 3 else components[2]
@@ -617,7 +625,7 @@ class MetadataFactory:
return map(
folder_to_version_extension,
- MetadataFactory.iterate_folders(html_doc, category),
+ self.iterate_folders(html_doc, category),
)
@staticmethod
@@ -792,5 +800,5 @@ def show_list(meta: MetadataFactory):
else:
print(*output, sep=" ")
except (ArchiveDownloadError, ArchiveConnectionError) as e:
- e.suggested_action = suggested_follow_up(meta)
+ e.append_suggested_follow_up(suggested_follow_up(meta))
raise e from e
| `MetadataFactory` makes inappropriate assumptions about the HTML files it retrieves.
**Describe the bug**
`MetadataFactory` improperly assumes that HTML pages at download.qt.io will always contain a body that contains a table. Infrequently, the server will return something unexpected, and `aqt` should be able to provide a reasonable error message.
**To Reproduce**
Run `aqt list-qt linux desktop`, under the condition that the URL https://download.qt.io/online/qtsdkrepository/linux_x64/desktop/ returns a page without a table.
**Expected behavior**
The `aqt list-qt linux desktop` command returns a list of Qt versions.
**`aqt` output**
```
'NoneType' object has no attribute 'find_all'
Traceback (most recent call last):
File "/home/garald/.local/lib/python3.8/site-packages/aqt/installer.py", line 108, in run
args.func(args)
File "/home/garald/.local/lib/python3.8/site-packages/aqt/installer.py", line 524, in run_list_qt
show_list(meta)
File "/home/garald/.local/lib/python3.8/site-packages/aqt/metadata.py", line 775, in show_list
output = meta.getList()
File "/home/garald/.local/lib/python3.8/site-packages/aqt/metadata.py", line 435, in getList
return self._action()
File "/home/garald/.local/lib/python3.8/site-packages/aqt/metadata.py", line 473, in fetch_versions
versions = sorted(filter(None, map(get_version, filter(filter_by, versions_extensions))))
File "/home/garald/.local/lib/python3.8/site-packages/aqt/metadata.py", line 600, in iterate_folders
for row in soup.body.table.find_all("tr"):
AttributeError: 'NoneType' object has no attribute 'find_all'
aqtinstall(aqt) v2.1.0 on Python 3.8.10 [CPython GCC 9.4.0]
Working dir: /home/garald
Arguments: ['/home/garald/.local/bin/aqt', 'list-qt', 'linux', 'desktop'] Host: uname_result(system='Linux', node='garald-H310M-S2-2-0', release='5.4.0-109-generic', version='#123-Ubuntu SMP Fri Apr 8 09:10:54 UTC 2022', machine='x86_64', processor='x86_64')
===========================PLEASE FILE A BUG REPORT===========================
You have discovered a bug in aqt.
Please file a bug report at https://github.com/miurahr/aqtinstall/issues.
Please remember to include a copy of this program's output in your report.
```
**Desktop (please complete the following information):**
- OS: Linux
- aqtinstall(aqt) v2.1.0 on Python 3.8.10 [CPython GCC 9.4.0]
**Additional context**
This report comes from an off-topic comment posted in #496, but it is certainly a bug that needs to be addressed. I do not have all the context for this bug, but there's enough information in the program output to understand what happened.
| miurahr/aqtinstall | diff --git a/tests/test_list.py b/tests/test_list.py
index 57e308e..de92e96 100644
--- a/tests/test_list.py
+++ b/tests/test_list.py
@@ -1,6 +1,7 @@
import hashlib
import json
import os
+import posixpath
import re
import shutil
import sys
@@ -178,6 +179,31 @@ def test_list_versions_tools(monkeypatch, spec_regex, os_name, target, in_file,
assert f"{all_ver_for_spec}" == row
[email protected](
+ "html_doc",
+ (
+ "<html><body>Login to my public WIFI network:<form>...</form></body></html>",
+ "<html>malformed-html/",
+ ),
+)
+def test_list_bad_html(monkeypatch, html_doc: str):
+ monkeypatch.setattr(MetadataFactory, "fetch_http", lambda *args, **kwargs: html_doc)
+ archive_id = ArchiveId("qt", "linux", "desktop")
+ expected_url = posixpath.join(Settings.baseurl, archive_id.to_url())
+ expected_exception = ArchiveConnectionError(
+ f"Failed to retrieve the expected HTML page at {expected_url}",
+ suggested_action=[
+ "Check your network connection.",
+ f"Make sure that you can access {expected_url} in your web browser.",
+ ],
+ )
+
+ with pytest.raises(ArchiveConnectionError) as e:
+ MetadataFactory(archive_id).fetch_versions()
+ assert e.type == ArchiveConnectionError
+ assert format(e.value) == format(expected_exception)
+
+
@pytest.mark.parametrize(
"version,extension,in_file,expect_out_file",
[
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 2
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-socket"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/miurahr/aqtinstall.git@320df539c0cd050686022e004805282bd86e7760#egg=aqtinstall
beautifulsoup4==4.13.3
Brotli==1.1.0
bs4==0.0.2
certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
defusedxml==0.7.1
exceptiongroup==1.2.2
idna==3.10
inflate64==1.0.1
iniconfig==2.1.0
multivolumefile==0.2.3
packaging==24.2
patch==1.16
pluggy==1.5.0
psutil==7.0.0
py7zr==0.22.0
pybcj==1.0.3
pycryptodomex==3.22.0
pyppmd==1.1.1
pytest==8.3.5
pytest-cov==6.0.0
pytest-socket==0.7.0
pyzstd==0.16.2
requests==2.32.3
semantic-version==2.10.0
soupsieve==2.6
texttable==1.7.0
tomli==2.2.1
typing_extensions==4.13.0
urllib3==2.3.0
| name: aqtinstall
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aqtinstall==2.1.1.dev5
- beautifulsoup4==4.13.3
- brotli==1.1.0
- bs4==0.0.2
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- defusedxml==0.7.1
- exceptiongroup==1.2.2
- idna==3.10
- inflate64==1.0.1
- iniconfig==2.1.0
- multivolumefile==0.2.3
- packaging==24.2
- patch==1.16
- pluggy==1.5.0
- psutil==7.0.0
- py7zr==0.22.0
- pybcj==1.0.3
- pycryptodomex==3.22.0
- pyppmd==1.1.1
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-socket==0.7.0
- pyzstd==0.16.2
- requests==2.32.3
- semantic-version==2.10.0
- soupsieve==2.6
- texttable==1.7.0
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==2.3.0
prefix: /opt/conda/envs/aqtinstall
| [
"tests/test_list.py::test_list_bad_html[<html><body>Login",
"tests/test_list.py::test_list_bad_html[<html>malformed-html/]"
] | [] | [
"tests/test_list.py::test_list_extension_for_arch[wasm_32-version0-wasm]",
"tests/test_list.py::test_list_extension_for_arch[mingw-version1-]",
"tests/test_list.py::test_list_extension_for_arch[android_fake-version2-]",
"tests/test_list.py::test_list_extension_for_arch[android_x86-version3-]",
"tests/test_list.py::test_list_extension_for_arch[android_x86-version4-x86]",
"tests/test_list.py::test_list_extension_for_arch[android_x86-version5-x86]",
"tests/test_list.py::test_list_possible_extension_for_arch[wasm_32-expect0]",
"tests/test_list.py::test_list_possible_extension_for_arch[mingw-expect1]",
"tests/test_list.py::test_list_possible_extension_for_arch[android_fake-expect2]",
"tests/test_list.py::test_list_possible_extension_for_arch[android-expect3]",
"tests/test_list.py::test_list_possible_extension_for_arch[android_x86-expect4]",
"tests/test_list.py::test_versions[init_data0-[[Version('1.1.1'),",
"tests/test_list.py::test_versions[init_data1-[]--expect_flat1-None-False]",
"tests/test_list.py::test_versions[init_data2-[[Version('1.2.3')]]-1.2.3-expect_flat2-expect_last2-True]",
"tests/test_list.py::test_list_versions_tools[windows-android-windows-android.html-windows-android-expect.json]",
"tests/test_list.py::test_list_versions_tools[windows-desktop-windows-desktop.html-windows-desktop-expect.json]",
"tests/test_list.py::test_list_versions_tools[windows-winrt-windows-winrt.html-windows-winrt-expect.json]",
"tests/test_list.py::test_list_versions_tools[linux-android-linux-android.html-linux-android-expect.json]",
"tests/test_list.py::test_list_versions_tools[linux-desktop-linux-desktop.html-linux-desktop-expect.json]",
"tests/test_list.py::test_list_versions_tools[mac-android-mac-android.html-mac-android-expect.json]",
"tests/test_list.py::test_list_versions_tools[mac-desktop-mac-desktop.html-mac-desktop-expect.json]",
"tests/test_list.py::test_list_versions_tools[mac-ios-mac-ios.html-mac-ios-expect.json]",
"tests/test_list.py::test_list_architectures_and_modules[5.14.0--windows-5140-update.xml-windows-5140-expect.json]",
"tests/test_list.py::test_list_architectures_and_modules[5.15.0--windows-5150-update.xml-windows-5150-expect.json]",
"tests/test_list.py::test_list_architectures_and_modules[5.15.2-src_doc_examples-windows-5152-src-doc-example-update.xml-windows-5152-src-doc-example-expect.json]",
"tests/test_list.py::test_list_architectures_and_modules[6.2.0--windows-620-update.xml-windows-620-expect.json]",
"tests/test_list.py::test_list_src_doc_examples_archives[src-windows-5.15.2-expected0]",
"tests/test_list.py::test_list_src_doc_examples_archives[doc-windows-5.15.2-expected1]",
"tests/test_list.py::test_list_src_doc_examples_archives[examples-windows-5.15.2-expected2]",
"tests/test_list.py::test_list_src_doc_examples_modules[doc-windows-5.15.2-expected0]",
"tests/test_list.py::test_list_src_doc_examples_modules[examples-windows-5.15.2-expected1]",
"tests/test_list.py::test_list_src_doc_examples_cli[list-src",
"tests/test_list.py::test_list_src_doc_examples_cli[list-doc",
"tests/test_list.py::test_list_src_doc_examples_cli[list-example",
"tests/test_list.py::test_list_archives[5.14.0-win32_mingw73-modules_to_query0-modules_failed_query0]",
"tests/test_list.py::test_list_archives[5.14.0-win32_mingw73-modules_to_query1-modules_failed_query1]",
"tests/test_list.py::test_list_archives[5.14.0-win32_mingw73-modules_to_query2-modules_failed_query2]",
"tests/test_list.py::test_list_archives[5.14.0-win32_mingw73-modules_to_query3-modules_failed_query3]",
"tests/test_list.py::test_list_archives[5.14.0-win64_msvc2017_64-modules_to_query4-modules_failed_query4]",
"tests/test_list.py::test_list_archives[5.14.0-win64_msvc2017_64-modules_to_query5-modules_failed_query5]",
"tests/test_list.py::test_list_archives_insufficient_args",
"tests/test_list.py::test_list_archives_bad_xml",
"tests/test_list.py::test_tool_modules[mac-desktop-tools_cmake]",
"tests/test_list.py::test_tool_modules[mac-desktop-tools_ifw]",
"tests/test_list.py::test_tool_modules[mac-desktop-tools_qtcreator]",
"tests/test_list.py::test_list_qt_cli[--extensions",
"tests/test_list.py::test_list_qt_cli[--spec",
"tests/test_list.py::test_list_qt_cli[--modules",
"tests/test_list.py::test_list_qt_cli[--extension",
"tests/test_list.py::test_list_qt_cli[--arch",
"tests/test_list.py::test_list_targets[list-qt-windows-expect0]",
"tests/test_list.py::test_list_targets[list-qt-linux-expect1]",
"tests/test_list.py::test_list_targets[list-qt-mac-expect2]",
"tests/test_list.py::test_list_targets[list-tool-windows-expect3]",
"tests/test_list.py::test_list_targets[list-tool-linux-expect4]",
"tests/test_list.py::test_list_targets[list-tool-mac-expect5]",
"tests/test_list.py::test_list_wrong_target[list-qt-windows-ios]",
"tests/test_list.py::test_list_wrong_target[list-qt-linux-ios]",
"tests/test_list.py::test_list_wrong_target[list-qt-linux-winrt]",
"tests/test_list.py::test_list_wrong_target[list-qt-mac-winrt]",
"tests/test_list.py::test_list_wrong_target[list-tool-windows-ios]",
"tests/test_list.py::test_list_wrong_target[list-tool-linux-ios]",
"tests/test_list.py::test_list_wrong_target[list-tool-linux-winrt]",
"tests/test_list.py::test_list_wrong_target[list-tool-mac-winrt]",
"tests/test_list.py::test_invalid_spec[list-qt-not",
"tests/test_list.py::test_invalid_spec[list-qt-1...3]",
"tests/test_list.py::test_invalid_spec[list-qt-]",
"tests/test_list.py::test_invalid_spec[list-qt->3",
"tests/test_list.py::test_list_choose_tool_by_version[simple_spec0-mytool.999]",
"tests/test_list.py::test_list_choose_tool_by_version[simple_spec1-mytool.999]",
"tests/test_list.py::test_list_choose_tool_by_version[simple_spec2-mytool.355]",
"tests/test_list.py::test_list_choose_tool_by_version[simple_spec3-mytool.300]",
"tests/test_list.py::test_list_choose_tool_by_version[simple_spec4-mytool.355]",
"tests/test_list.py::test_list_choose_tool_by_version[simple_spec5-mytool.350]",
"tests/test_list.py::test_list_choose_tool_by_version[simple_spec6-None]",
"tests/test_list.py::test_list_invalid_extensions[android--6.2.0-Qt",
"tests/test_list.py::test_list_invalid_extensions[android-arm64_v8a-5.13.0-The",
"tests/test_list.py::test_list_invalid_extensions[desktop-arm64_v8a-5.13.0-The",
"tests/test_list.py::test_list_invalid_extensions[desktop-arm64_v8a-6.2.0-The",
"tests/test_list.py::test_list_invalid_extensions[desktop-wasm-5.12.11-The",
"tests/test_list.py::test_list_invalid_extensions[desktop-wasm-6.1.9-The",
"tests/test_list.py::test_list_invalid_extensions[android-wasm-5.12.11-The",
"tests/test_list.py::test_list_invalid_extensions[android-wasm-5.14.0-The",
"tests/test_list.py::test_list_invalid_extensions[android-wasm-6.1.9-Qt",
"tests/test_list.py::test_suggested_follow_up[meta0-expected_message0]",
"tests/test_list.py::test_suggested_follow_up[meta1-expected_message1]",
"tests/test_list.py::test_suggested_follow_up[meta2-expected_message2]",
"tests/test_list.py::test_suggested_follow_up[meta3-expected_message3]",
"tests/test_list.py::test_suggested_follow_up[meta4-expected_message4]",
"tests/test_list.py::test_suggested_follow_up[meta5-expected_message5]",
"tests/test_list.py::test_suggested_follow_up[meta6-expected_message6]",
"tests/test_list.py::test_suggested_follow_up[meta7-expected_message7]",
"tests/test_list.py::test_suggested_follow_up[meta8-expected_message8]",
"tests/test_list.py::test_suggested_follow_up[meta9-expected_message9]",
"tests/test_list.py::test_suggested_follow_up[meta10-expected_message10]",
"tests/test_list.py::test_suggested_follow_up[meta11-expected_message11]",
"tests/test_list.py::test_suggested_follow_up[meta12-expected_message12]",
"tests/test_list.py::test_suggested_follow_up[meta13-expected_message13]",
"tests/test_list.py::test_format_suggested_follow_up",
"tests/test_list.py::test_format_suggested_follow_up_empty",
"tests/test_list.py::test_list_describe_filters[meta0-qt/mac/desktop",
"tests/test_list.py::test_list_describe_filters[meta1-qt/mac/desktop/wasm",
"tests/test_list.py::test_list_describe_filters[meta2-qt/mac/desktop]",
"tests/test_list.py::test_list_describe_filters[meta3-qt/mac/desktop/wasm]",
"tests/test_list.py::test_list_to_version[archive_id0-None-5.12.42-expect0]",
"tests/test_list.py::test_list_to_version[archive_id1-None-not",
"tests/test_list.py::test_list_to_version[archive_id2-spec2-latest-expect2]",
"tests/test_list.py::test_list_to_version[archive_id3-spec3-latest-expect3]",
"tests/test_list.py::test_list_fetch_tool_by_simple_spec",
"tests/test_list.py::test_show_list_tools_long_ifw[120-Tool",
"tests/test_list.py::test_show_list_tools_long_ifw[80-Tool",
"tests/test_list.py::test_show_list_tools_long_ifw[0-Tool",
"tests/test_list.py::test_show_list_versions",
"tests/test_list.py::test_show_list_tools",
"tests/test_list.py::test_show_list_empty",
"tests/test_list.py::test_show_list_bad_connection",
"tests/test_list.py::test_list_tool_cli[mac-desktop-tools_cmake]",
"tests/test_list.py::test_fetch_http_ok",
"tests/test_list.py::test_fetch_http_failover",
"tests/test_list.py::test_fetch_http_download_error[ArchiveDownloadError]",
"tests/test_list.py::test_fetch_http_download_error[ArchiveConnectionError]"
] | [] | MIT License | 12,665 | 1,292 | [
"aqt/exceptions.py",
"aqt/metadata.py"
] |
|
frictionlessdata__frictionless-py-1039 | 1a11c3e762c6b4a982dbf5ce043e15112ef05ef1 | 2022-04-20 12:54:33 | 930ae7e8d31ac3d1540e482badd03ab6e19aab56 | diff --git a/frictionless/metadata.py b/frictionless/metadata.py
index cce9ff08..419d13b0 100644
--- a/frictionless/metadata.py
+++ b/frictionless/metadata.py
@@ -11,6 +11,7 @@ from importlib import import_module
from .exception import FrictionlessException
from .helpers import cached_property
from . import helpers
+import pprint as pp
# NOTE:
@@ -83,6 +84,10 @@ class Metadata(helpers.ControlledDict):
if value is not None:
dict.__setitem__(self, key, value)
+ def __repr__(self) -> str:
+ """Returns string representation for metadata."""
+ return pp.pformat(self.to_dict())
+
# Expand
def expand(self):
| Ability to pretty-print metadata in sessions / notebooks
# Overview
Currently, many users use `pprint.pprint` but we can provide some builtin method for it (or use `__repr__` etc). It needs to be thought through
| frictionlessdata/frictionless-py | diff --git a/tests/test_field.py b/tests/test_field.py
index a6d7977b..c13f282b 100644
--- a/tests/test_field.py
+++ b/tests/test_field.py
@@ -357,3 +357,12 @@ def test_field_set_schema():
def test_field_set_type():
field = Field(type="int")
assert field.type == "int"
+
+
+# Issues
+
+
+def test_field_pprint_1029():
+ field = Field({"name": "name", "type": "string", "constraints": {"maxLength": 2}})
+ expected = """{'constraints': {'maxLength': 2}, 'name': 'name', 'type': 'string'}"""
+ assert repr(field) == expected
diff --git a/tests/test_inquiry.py b/tests/test_inquiry.py
index c2010a62..0fcb34e2 100644
--- a/tests/test_inquiry.py
+++ b/tests/test_inquiry.py
@@ -19,3 +19,20 @@ def test_inquiry_with_task_class():
)
report = inquiry.run()
assert report.valid
+
+
+# Issues
+
+
+def test_inquiry_pprint_1029():
+ inquiry = Inquiry(
+ {
+ "tasks": [
+ {"source": "data/capital-valid.csv"},
+ {"source": "data/capital-invalid.csv"},
+ ]
+ }
+ )
+ expected = """{'tasks': [{'source': 'data/capital-valid.csv'},
+ {'source': 'data/capital-invalid.csv'}]}"""
+ assert repr(inquiry) == expected
diff --git a/tests/test_metadata.py b/tests/test_metadata.py
index bf17bda6..e362940a 100644
--- a/tests/test_metadata.py
+++ b/tests/test_metadata.py
@@ -1,6 +1,5 @@
from frictionless import Metadata
-
# General
@@ -12,3 +11,27 @@ def test_descriptor():
def test_descriptor_from_path():
metadata = Metadata("data/schema-valid.json")
assert metadata["primaryKey"] == "id"
+
+
+# Issues
+
+
+def test_metadata_pprint_1029():
+ metadata = Metadata("data/schema-valid.json")
+ expected = """{'fields': [{'constraints': {'required': True},
+ 'description': 'The id.',
+ 'name': 'id',
+ 'title': 'ID',
+ 'type': 'integer'},
+ {'constraints': {'required': True},
+ 'description': 'The name.',
+ 'name': 'name',
+ 'title': 'Name',
+ 'type': 'string'},
+ {'constraints': {'required': True},
+ 'description': 'The age.',
+ 'name': 'age',
+ 'title': 'Age',
+ 'type': 'integer'}],
+ 'primaryKey': 'id'}"""
+ assert repr(metadata) == expected
diff --git a/tests/test_package.py b/tests/test_package.py
index 59d71342..d55a2b68 100644
--- a/tests/test_package.py
+++ b/tests/test_package.py
@@ -1099,3 +1099,11 @@ def test_package_set_trusted():
assert package.trusted is True
package.trusted = False
assert package.trusted is False
+
+
+def test_package_pprint_1029():
+ data = [["id", "name"], ["1", "english"], ["2", "中国人"]]
+ package = Package({"resources": [{"name": "name", "data": data}]})
+ expected = """{'resources': [{'data': [['id', 'name'], ['1', 'english'], ['2', '中国人']],
+ 'name': 'name'}]}"""
+ assert repr(package) == expected
diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py
index 30b51af5..3c1a0ed4 100644
--- a/tests/test_pipeline.py
+++ b/tests/test_pipeline.py
@@ -49,3 +49,28 @@ def test_pipeline_package():
)
status = pipeline.run()
assert status.task.target.resource_names == ["data"]
+
+
+# Issues
+
+
+def test_pipeline_pprint_1029():
+ pipeline = Pipeline(
+ {
+ "tasks": [
+ {
+ "type": "resource",
+ "source": {"path": "../data/transform.csv"},
+ "steps": [
+ {"code": "table-normalize"},
+ {"code": "table-melt", "fieldName": "name"},
+ ],
+ }
+ ]
+ }
+ )
+ expected = """{'tasks': [{'source': {'path': '../data/transform.csv'},
+ 'steps': [{'code': 'table-normalize'},
+ {'code': 'table-melt', 'fieldName': 'name'}],
+ 'type': 'resource'}]}"""
+ assert repr(pipeline) == expected
diff --git a/tests/test_report.py b/tests/test_report.py
index 493a7db0..f88f7d3e 100644
--- a/tests/test_report.py
+++ b/tests/test_report.py
@@ -100,3 +100,25 @@ def test_report_to_yaml_with_bytes_serialization_issue_836():
report = validate(source)
descriptor = report.to_yaml()
assert "binary" not in descriptor
+
+
+# Issues
+
+
+def test_report_pprint_1029():
+ report = validate(
+ "data/capital-invalid.csv", pick_errors=["duplicate-label"], time=None
+ )
+ expected = """{'errors': [{'code': 'task-error',
+ 'description': 'General task-level error.',
+ 'message': 'The task has an error: __init__() got an unexpected '
+ "keyword argument 'time'",
+ 'name': 'Task Error',
+ 'note': "__init__() got an unexpected keyword argument 'time'",
+ 'tags': []}],
+ 'stats': {'errors': 1, 'tasks': 0},
+ 'tasks': [],
+ 'time': 0.0,
+ 'valid': False,
+ 'version': '4.29.0'}"""
+ assert repr(report) == expected
diff --git a/tests/test_resource.py b/tests/test_resource.py
index da61276b..6d91653d 100644
--- a/tests/test_resource.py
+++ b/tests/test_resource.py
@@ -2748,3 +2748,17 @@ def test_resource_set_package():
test_package_2 = Package()
resource.package = test_package_2
assert resource.package == test_package_2
+
+
+def test_resource_pprint_1029():
+ resource = Resource(
+ name="resource",
+ title="My Resource",
+ description="My Resource for the Guide",
+ path="data/table.csv",
+ )
+ expected = """{'description': 'My Resource for the Guide',
+ 'name': 'resource',
+ 'path': 'data/table.csv',
+ 'title': 'My Resource'}"""
+ assert repr(resource) == expected
diff --git a/tests/test_schema.py b/tests/test_schema.py
index 9b9e7f09..1e53b25d 100644
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -467,3 +467,18 @@ def test_schema_not_supported_type_issue_goodatbles_304():
schema = Schema({"fields": [{"name": "name"}, {"name": "age", "type": "bad"}]})
assert schema.metadata_valid is False
assert schema.fields[1] == {"name": "age", "type": "bad"}
+
+
+def test_schema_pprint_1029():
+ descriptor = {
+ "fields": [
+ {"name": "test_1", "type": "string", "format": "default"},
+ {"name": "test_2", "type": "string", "format": "default"},
+ {"name": "test_3", "type": "string", "format": "default"},
+ ]
+ }
+ schema = Schema(descriptor)
+ expected = """{'fields': [{'format': 'default', 'name': 'test_1', 'type': 'string'},
+ {'format': 'default', 'name': 'test_2', 'type': 'string'},
+ {'format': 'default', 'name': 'test_3', 'type': 'string'}]}"""
+ assert repr(schema) == expected
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 4.29 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install --upgrade -e .[bigquery,ckan,excel,gsheets,html,json,ods,pandas,s3,server,spss,sql,dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-vcr",
"pytest-only",
"pytest-timeout"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc postgresql libpq-dev"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asttokens==3.0.0
attrs==25.3.0
black==23.12.1
blinker==1.9.0
boto3==1.37.23
botocore==1.37.23
cached-property==2.0.1
cachetools==5.5.2
certifi==2025.1.31
cffi==1.17.1
chardet==5.2.0
charset-normalizer==3.4.1
ckanapi==4.8
click==8.1.8
coverage==7.8.0
cryptography==44.0.2
cssselect==1.3.0
databind==4.5.2
databind.core==4.5.2
databind.json==4.5.2
decorator==5.2.1
deepmerge==2.0
Deprecated==1.2.18
docopt==0.6.2
docspec==2.2.1
docspec-python==2.2.1
docstring_parser==0.11
et_xmlfile==2.0.0
exceptiongroup==1.2.2
executing==2.2.0
ezodf==0.3.2
Flask==3.1.0
-e git+https://github.com/frictionlessdata/frictionless-py.git@1a11c3e762c6b4a982dbf5ce043e15112ef05ef1#egg=frictionless
gitdb==4.0.12
GitPython==3.1.44
giturlparse==0.12.0
google-api-core==2.24.2
google-api-python-client==2.166.0
google-auth==2.38.0
google-auth-httplib2==0.2.0
google-auth-oauthlib==1.2.1
googleapis-common-protos==1.69.2
greenlet==3.1.1
gunicorn==23.0.0
httplib2==0.22.0
idna==3.10
ijson==3.3.0
importlib_metadata==8.6.1
iniconfig==2.1.0
ipython==8.18.1
isodate==0.7.2
itsdangerous==2.2.0
jedi==0.19.2
Jinja2==3.1.6
jmespath==1.0.1
jsonlines==4.0.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
livemark==0.110.8
livereload==2.7.1
lxml==5.3.1
markdown-it-py==3.0.0
marko==1.3.1
MarkupSafe==3.0.2
matplotlib-inline==0.1.7
mccabe==0.7.0
mdurl==0.1.2
moto==5.1.2
multidict==6.2.0
mypy==1.15.0
mypy-extensions==1.0.0
nr-date==2.1.0
nr-stream==1.1.5
nr.util==0.8.12
numpy==2.0.2
oauth2client==4.1.3
oauthlib==3.2.2
openpyxl==3.1.5
packaging==24.2
pandas==2.2.3
parso==0.8.4
pathspec==0.12.1
petl==1.7.15
pexpect==4.9.0
platformdirs==4.3.7
pluggy==1.5.0
prompt_toolkit==3.0.50
propcache==0.3.1
proto-plus==1.26.1
protobuf==6.30.2
psycopg2==2.9.10
ptyprocess==0.7.0
pure_eval==0.2.3
pyasn1==0.6.1
pyasn1_modules==0.4.2
pycodestyle==2.13.0
pycparser==2.22
pydoc-markdown==4.8.2
pydocstyle==6.3.0
pyflakes==3.3.1
Pygments==2.19.1
pygsheets==2.0.6
pylama==8.4.1
PyMySQL==1.1.1
pyparsing==3.2.3
pyquery==1.4.3
pytest==8.3.5
pytest-cov==6.0.0
pytest-only==2.1.2
pytest-timeout==2.3.1
pytest-vcr==1.0.2
python-dateutil==2.9.0.post0
python-dotenv==1.1.0
python-slugify==8.0.4
pytz==2025.2
PyYAML==6.0.2
referencing==0.36.2
requests==2.32.3
requests-mock==1.12.1
requests-oauthlib==2.0.0
responses==0.25.7
rfc3986==2.0.0
rich==14.0.0
rpds-py==0.24.0
rsa==4.9
s3transfer==0.11.4
savReaderWriter==3.4.2
shellingham==1.5.4
simpleeval==1.0.3
simplejson==3.20.1
six==1.17.0
smmap==5.0.2
snowballstemmer==2.2.0
SQLAlchemy==2.0.40
stack-data==0.6.3
stringcase==1.2.0
text-unidecode==1.3
tomli==2.2.1
tomli_w==1.2.0
tornado==6.4.2
traitlets==5.14.3
typeapi==2.2.4
typer==0.15.2
typing_extensions==4.13.0
tzdata==2025.2
uritemplate==4.1.1
urllib3==1.26.20
validators==0.34.0
vcrpy==7.0.0
watchdog==6.0.0
wcwidth==0.2.13
Werkzeug==3.1.3
wrapt==1.17.2
xlrd==2.0.1
xlwt==1.3.0
xmltodict==0.14.2
yapf==0.43.0
yarl==1.18.3
zipp==3.21.0
| name: frictionless-py
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asttokens==3.0.0
- attrs==25.3.0
- black==23.12.1
- blinker==1.9.0
- boto3==1.37.23
- botocore==1.37.23
- cached-property==2.0.1
- cachetools==5.5.2
- certifi==2025.1.31
- cffi==1.17.1
- chardet==5.2.0
- charset-normalizer==3.4.1
- ckanapi==4.8
- click==8.1.8
- coverage==7.8.0
- cryptography==44.0.2
- cssselect==1.3.0
- databind==4.5.2
- databind-core==4.5.2
- databind-json==4.5.2
- decorator==5.2.1
- deepmerge==2.0
- deprecated==1.2.18
- docopt==0.6.2
- docspec==2.2.1
- docspec-python==2.2.1
- docstring-parser==0.11
- et-xmlfile==2.0.0
- exceptiongroup==1.2.2
- executing==2.2.0
- ezodf==0.3.2
- flask==3.1.0
- frictionless==4.29.0
- gitdb==4.0.12
- gitpython==3.1.44
- giturlparse==0.12.0
- google-api-core==2.24.2
- google-api-python-client==2.166.0
- google-auth==2.38.0
- google-auth-httplib2==0.2.0
- google-auth-oauthlib==1.2.1
- googleapis-common-protos==1.69.2
- greenlet==3.1.1
- gunicorn==23.0.0
- httplib2==0.22.0
- idna==3.10
- ijson==3.3.0
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- ipython==8.18.1
- isodate==0.7.2
- itsdangerous==2.2.0
- jedi==0.19.2
- jinja2==3.1.6
- jmespath==1.0.1
- jsonlines==4.0.0
- jsonschema==4.23.0
- jsonschema-specifications==2024.10.1
- livemark==0.110.8
- livereload==2.7.1
- lxml==5.3.1
- markdown-it-py==3.0.0
- marko==1.3.1
- markupsafe==3.0.2
- matplotlib-inline==0.1.7
- mccabe==0.7.0
- mdurl==0.1.2
- moto==5.1.2
- multidict==6.2.0
- mypy==1.15.0
- mypy-extensions==1.0.0
- nr-date==2.1.0
- nr-stream==1.1.5
- nr-util==0.8.12
- numpy==2.0.2
- oauth2client==4.1.3
- oauthlib==3.2.2
- openpyxl==3.1.5
- packaging==24.2
- pandas==2.2.3
- parso==0.8.4
- pathspec==0.12.1
- petl==1.7.15
- pexpect==4.9.0
- platformdirs==4.3.7
- pluggy==1.5.0
- prompt-toolkit==3.0.50
- propcache==0.3.1
- proto-plus==1.26.1
- protobuf==6.30.2
- psycopg2==2.9.10
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- pycodestyle==2.13.0
- pycparser==2.22
- pydoc-markdown==4.8.2
- pydocstyle==6.3.0
- pyflakes==3.3.1
- pygments==2.19.1
- pygsheets==2.0.6
- pylama==8.4.1
- pymysql==1.1.1
- pyparsing==3.2.3
- pyquery==1.4.3
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-only==2.1.2
- pytest-timeout==2.3.1
- pytest-vcr==1.0.2
- python-dateutil==2.9.0.post0
- python-dotenv==1.1.0
- python-slugify==8.0.4
- pytz==2025.2
- pyyaml==6.0.2
- referencing==0.36.2
- requests==2.32.3
- requests-mock==1.12.1
- requests-oauthlib==2.0.0
- responses==0.25.7
- rfc3986==2.0.0
- rich==14.0.0
- rpds-py==0.24.0
- rsa==4.9
- s3transfer==0.11.4
- savreaderwriter==3.4.2
- shellingham==1.5.4
- simpleeval==1.0.3
- simplejson==3.20.1
- six==1.17.0
- smmap==5.0.2
- snowballstemmer==2.2.0
- sqlalchemy==2.0.40
- stack-data==0.6.3
- stringcase==1.2.0
- text-unidecode==1.3
- tomli==2.2.1
- tomli-w==1.2.0
- tornado==6.4.2
- traitlets==5.14.3
- typeapi==2.2.4
- typer==0.15.2
- typing-extensions==4.13.0
- tzdata==2025.2
- uritemplate==4.1.1
- urllib3==1.26.20
- validators==0.34.0
- vcrpy==7.0.0
- watchdog==6.0.0
- wcwidth==0.2.13
- werkzeug==3.1.3
- wrapt==1.17.2
- xlrd==2.0.1
- xlwt==1.3.0
- xmltodict==0.14.2
- yapf==0.43.0
- yarl==1.18.3
- zipp==3.21.0
prefix: /opt/conda/envs/frictionless-py
| [
"tests/test_field.py::test_field_pprint_1029",
"tests/test_inquiry.py::test_inquiry_pprint_1029",
"tests/test_metadata.py::test_metadata_pprint_1029",
"tests/test_package.py::test_package_pprint_1029",
"tests/test_pipeline.py::test_pipeline_pprint_1029",
"tests/test_report.py::test_report_pprint_1029",
"tests/test_resource.py::test_resource_pprint_1029",
"tests/test_schema.py::test_schema_pprint_1029"
] | [
"tests/test_package.py::test_package_external_profile_invalid_local",
"tests/test_package.py::test_package_external_profile_invalid_local_from_descriptor",
"tests/test_package.py::test_package_external_profile_invalid_local_from_descriptor_unsafe_trusted",
"tests/test_package.py::test_package_external_profile_invalid_remote",
"tests/test_package.py::test_package_external_profile_invalid_remote_from_descriptor",
"tests/test_resource.py::test_resource_wrong_encoding_detection_issue_265"
] | [
"tests/test_field.py::test_field",
"tests/test_field.py::test_field_defaults",
"tests/test_field.py::test_field_read_cell",
"tests/test_field.py::test_field_read_cell_string_missing_values",
"tests/test_field.py::test_field_read_cell_number_missingValues",
"tests/test_field.py::test_field_standard_specs_properties[create_descriptor0]",
"tests/test_field.py::test_field_standard_specs_properties[create_descriptor1]",
"tests/test_field.py::test_field_description_html",
"tests/test_field.py::test_field_description_html_multiline",
"tests/test_field.py::test_field_description_html_not_set",
"tests/test_field.py::test_field_description_text",
"tests/test_field.py::test_field_description_text_plain",
"tests/test_field.py::test_field_constraint_field_type[constraints0-any-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints1-array-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints2-boolean-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints3-date-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints4-datetime-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints5-duration-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints6-geojson-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints7-geopoint-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints8-integer-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints9-number-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints10-object-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints11-string-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints12-time-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints13-year-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints14-yearmonth-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints15-any-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints16-array-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints17-boolean-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints18-date-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints19-datetime-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints20-duration-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints21-geojson-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints22-geopoint-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints23-integer-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints24-number-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints25-object-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints26-string-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints27-time-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints28-year-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints29-yearmonth-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints30-any-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints31-array-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints32-boolean-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints33-date-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints34-datetime-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints35-duration-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints36-geojson-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints37-geopoint-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints38-integer-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints39-number-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints40-object-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints41-string-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints42-time-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints43-year-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints44-yearmonth-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints45-any-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints46-array-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints47-boolean-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints48-date-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints49-datetime-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints50-duration-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints51-geojson-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints52-geopoint-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints53-integer-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints54-number-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints55-object-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints56-string-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints57-time-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints58-year-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints59-yearmonth-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints60-any-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints61-array-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints62-boolean-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints63-date-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints64-datetime-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints65-duration-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints66-geojson-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints67-geopoint-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints68-integer-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints69-number-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints70-object-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints71-string-True]",
"tests/test_field.py::test_field_constraint_field_type[constraints72-time-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints73-year-False]",
"tests/test_field.py::test_field_constraint_field_type[constraints74-yearmonth-False]",
"tests/test_field.py::test_field_read_cell_required",
"tests/test_field.py::test_field_read_cell_minLength",
"tests/test_field.py::test_field_read_cell_maxLength",
"tests/test_field.py::test_field_read_cell_minimum",
"tests/test_field.py::test_field_read_cell_maximum",
"tests/test_field.py::test_field_read_cell_pattern",
"tests/test_field.py::test_field_read_cell_enum",
"tests/test_field.py::test_field_read_cell_multiple_constraints",
"tests/test_field.py::test_field_with_example_set[None]",
"tests/test_field.py::test_field_with_example_set[42]",
"tests/test_field.py::test_field_with_example_set[foo]",
"tests/test_field.py::test_field_to_copy",
"tests/test_field.py::test_field_set_schema",
"tests/test_field.py::test_field_set_type",
"tests/test_inquiry.py::test_inquiry",
"tests/test_inquiry.py::test_inquiry_with_task_class",
"tests/test_metadata.py::test_descriptor",
"tests/test_metadata.py::test_descriptor_from_path",
"tests/test_package.py::test_package",
"tests/test_package.py::test_package_from_dict",
"tests/test_package.py::test_package_from_mapping",
"tests/test_package.py::test_package_from_path",
"tests/test_package.py::test_package_from_pathlib",
"tests/test_package.py::test_package_from_path_error_bad_path",
"tests/test_package.py::test_package_from_path_error_non_json",
"tests/test_package.py::test_package_from_path_error_bad_json",
"tests/test_package.py::test_package_from_path_error_bad_json_not_dict",
"tests/test_package.py::test_package_from_path_remote",
"tests/test_package.py::test_package_from_path_remote_error_not_found",
"tests/test_package.py::test_package_from_path_remote_error_bad_json",
"tests/test_package.py::test_package_from_path_remote_error_bad_json_not_dict",
"tests/test_package.py::test_package_from_invalid_descriptor_type",
"tests/test_package.py::test_package_from_zip",
"tests/test_package.py::test_package_from_zip_remote",
"tests/test_package.py::test_package_from_zip_no_descriptor",
"tests/test_package.py::test_package_from_zip_innerpath",
"tests/test_package.py::test_package_standard_specs_properties[create_descriptor0]",
"tests/test_package.py::test_package_standard_specs_properties[create_descriptor1]",
"tests/test_package.py::test_package_description_html",
"tests/test_package.py::test_package_description_html_multiline",
"tests/test_package.py::test_package_description_text",
"tests/test_package.py::test_package_description_text_plain",
"tests/test_package.py::test_package_resources",
"tests/test_package.py::test_package_resources_inline",
"tests/test_package.py::test_package_resources_empty",
"tests/test_package.py::test_package_add_resource",
"tests/test_package.py::test_package_get_resource",
"tests/test_package.py::test_package_get_resource_error_not_found",
"tests/test_package.py::test_package_remove_resource",
"tests/test_package.py::test_package_remove_resource_error_not_found",
"tests/test_package.py::test_package_update_resource",
"tests/test_package.py::test_package_resources_append_in_place",
"tests/test_package.py::test_package_resources_remove_in_place",
"tests/test_package.py::test_package_resources_respect_layout_set_after_creation_issue_503",
"tests/test_package.py::test_package_compression_implicit_gz",
"tests/test_package.py::test_package_compression_implicit_zip",
"tests/test_package.py::test_package_compression_explicit_gz",
"tests/test_package.py::test_package_compression_explicit_zip",
"tests/test_package.py::test_package_schema_foreign_key",
"tests/test_package.py::test_package_schema_foreign_key_invalid",
"tests/test_package.py::test_package_schema_foreign_key_self_reference",
"tests/test_package.py::test_package_schema_foreign_key_self_reference_invalid",
"tests/test_package.py::test_package_schema_foreign_key_multifield",
"tests/test_package.py::test_package_schema_foreign_key_multifield_invalid",
"tests/test_package.py::test_resource_onerror",
"tests/test_package.py::test_resource_onerror_header_warn",
"tests/test_package.py::test_resource_onerror_header_raise",
"tests/test_package.py::test_resource_onerror_row_warn",
"tests/test_package.py::test_resource_onerror_row_raise",
"tests/test_package.py::test_package_expand",
"tests/test_package.py::test_package_expand_empty",
"tests/test_package.py::test_package_expand_resource_schema",
"tests/test_package.py::test_package_expand_resource_dialect",
"tests/test_package.py::test_package_infer",
"tests/test_package.py::test_package_infer_with_basepath",
"tests/test_package.py::test_package_infer_multiple_paths",
"tests/test_package.py::test_package_infer_non_utf8_file",
"tests/test_package.py::test_package_infer_empty_file",
"tests/test_package.py::test_package_infer_duplicate_resource_names_issue_530",
"tests/test_package.py::test_package_to_copy",
"tests/test_package.py::test_package_to_json",
"tests/test_package.py::test_package_to_yaml",
"tests/test_package.py::test_package_to_zip",
"tests/test_package.py::test_package_to_zip_resource_path",
"tests/test_package.py::test_package_to_zip_resource_remote_path",
"tests/test_package.py::test_package_to_zip_resource_memory_inline",
"tests/test_package.py::test_package_to_zip_resource_memory_function",
"tests/test_package.py::test_package_external_profile",
"tests/test_package.py::test_package_external_profile_invalid_local_from_descriptor_unsafe",
"tests/test_package.py::test_package_dialect_no_header_issue_167",
"tests/test_package.py::test_package_validation_is_not_strict_enough_issue_869",
"tests/test_package.py::test_package_validation_duplicate_resource_names_issue_942",
"tests/test_package.py::test_package_set_hashing",
"tests/test_package.py::test_package_set_base_path",
"tests/test_package.py::test_package_set_onerror",
"tests/test_package.py::test_package_set_trusted",
"tests/test_pipeline.py::test_pipeline_resource",
"tests/test_pipeline.py::test_pipeline_package",
"tests/test_report.py::test_report",
"tests/test_report.py::test_report_expand",
"tests/test_report.py::test_report_to_json_with_bytes_serialization_issue_836",
"tests/test_report.py::test_report_to_yaml_with_bytes_serialization_issue_836",
"tests/test_resource.py::test_resource",
"tests/test_resource.py::test_resource_from_dict",
"tests/test_resource.py::test_resource_from_path_json",
"tests/test_resource.py::test_resource_from_path_yaml",
"tests/test_resource.py::test_resource_from_path_yml_issue_644",
"tests/test_resource.py::test_resource_from_path_error_bad_path",
"tests/test_resource.py::test_resource_from_path_remote",
"tests/test_resource.py::test_resource_from_path_remote_error_bad_path",
"tests/test_resource.py::test_resource_source_non_tabular",
"tests/test_resource.py::test_resource_source_non_tabular_remote",
"tests/test_resource.py::test_resource_source_non_tabular_error_bad_path",
"tests/test_resource.py::test_resource_source_path",
"tests/test_resource.py::test_resource_source_path_and_basepath",
"tests/test_resource.py::test_resource_source_path_and_basepath_remote",
"tests/test_resource.py::test_resource_source_path_remote_and_basepath_remote",
"tests/test_resource.py::test_resource_source_path_error_bad_path",
"tests/test_resource.py::test_resource_source_path_error_bad_path_not_safe_absolute",
"tests/test_resource.py::test_resource_source_path_error_bad_path_not_safe_traversing",
"tests/test_resource.py::test_resource_source_data",
"tests/test_resource.py::test_resource_source_path_and_data",
"tests/test_resource.py::test_resource_source_no_path_and_no_data",
"tests/test_resource.py::test_resource_standard_specs_properties[create_descriptor0]",
"tests/test_resource.py::test_resource_standard_specs_properties[create_descriptor1]",
"tests/test_resource.py::test_resource_official_hash_bytes_rows",
"tests/test_resource.py::test_resource_official_hash_bytes_rows_with_hashing_algorithm",
"tests/test_resource.py::test_resource_description_html",
"tests/test_resource.py::test_resource_description_html_multiline",
"tests/test_resource.py::test_resource_description_html_not_set",
"tests/test_resource.py::test_resource_description_text",
"tests/test_resource.py::test_resource_description_text_plain",
"tests/test_resource.py::test_resource_scheme_file",
"tests/test_resource.py::test_resource_scheme_https",
"tests/test_resource.py::test_resource_scheme_stream",
"tests/test_resource.py::test_resource_scheme_buffer",
"tests/test_resource.py::test_resource_scheme_error_bad_scheme",
"tests/test_resource.py::test_resource_scheme_error_bad_scheme_and_format",
"tests/test_resource.py::test_resource_scheme_error_file_not_found",
"tests/test_resource.py::test_resource_scheme_error_file_not_found_remote",
"tests/test_resource.py::test_resource_scheme_error_file_not_found_bad_format",
"tests/test_resource.py::test_resource_scheme_error_file_not_found_bad_compression",
"tests/test_resource.py::test_resource_format_csv",
"tests/test_resource.py::test_resource_format_ndjson",
"tests/test_resource.py::test_resource_format_tsv",
"tests/test_resource.py::test_resource_format_xls",
"tests/test_resource.py::test_resource_format_xlsx",
"tests/test_resource.py::test_resource_format_error_non_matching_format",
"tests/test_resource.py::test_resource_hashing",
"tests/test_resource.py::test_resource_hashing_provided",
"tests/test_resource.py::test_resource_hashing_error_bad_hashing",
"tests/test_resource.py::test_resource_encoding",
"tests/test_resource.py::test_resource_encoding_explicit_utf8",
"tests/test_resource.py::test_resource_encoding_explicit_latin1",
"tests/test_resource.py::test_resource_encoding_utf_16",
"tests/test_resource.py::test_resource_encoding_error_bad_encoding",
"tests/test_resource.py::test_resource_encoding_error_non_matching_encoding",
"tests/test_resource.py::test_resource_innerpath_local_csv_zip",
"tests/test_resource.py::test_resource_innerpath_local_csv_zip_multiple_files",
"tests/test_resource.py::test_resource_innerpath_local_csv_zip_multiple_files_explicit",
"tests/test_resource.py::test_resource_compression_local_csv_zip",
"tests/test_resource.py::test_resource_compression_local_csv_zip_multiple_files",
"tests/test_resource.py::test_resource_compression_local_csv_zip_multiple_open",
"tests/test_resource.py::test_resource_compression_local_csv_gz",
"tests/test_resource.py::test_resource_compression_stream_csv_zip",
"tests/test_resource.py::test_resource_compression_stream_csv_gz",
"tests/test_resource.py::test_resource_compression_remote_csv_zip",
"tests/test_resource.py::test_resource_compression_remote_csv_gz",
"tests/test_resource.py::test_resource_compression_error_bad",
"tests/test_resource.py::test_resource_compression_error_invalid_zip",
"tests/test_resource.py::test_resource_compression_error_invalid_gz",
"tests/test_resource.py::test_resource_compression_legacy_no_value_issue_616",
"tests/test_resource.py::test_resource_control",
"tests/test_resource.py::test_resource_control_http_preload",
"tests/test_resource.py::test_resource_control_bad_property",
"tests/test_resource.py::test_resource_dialect",
"tests/test_resource.py::test_resource_dialect_from_path",
"tests/test_resource.py::test_resource_dialect_from_path_remote",
"tests/test_resource.py::test_resource_dialect_from_path_error_path_not_safe",
"tests/test_resource.py::test_resource_dialect_csv_default",
"tests/test_resource.py::test_resource_dialect_csv_delimiter",
"tests/test_resource.py::test_resource_dialect_json_property",
"tests/test_resource.py::test_resource_dialect_bad_property",
"tests/test_resource.py::test_resource_dialect_header_false_official",
"tests/test_resource.py::test_resource_layout_header",
"tests/test_resource.py::test_resource_layout_header_false",
"tests/test_resource.py::test_resource_layout_header_unicode",
"tests/test_resource.py::test_resource_layout_header_stream_context_manager",
"tests/test_resource.py::test_resource_layout_header_inline",
"tests/test_resource.py::test_resource_layout_header_json_keyed",
"tests/test_resource.py::test_resource_layout_header_inline_keyed",
"tests/test_resource.py::test_resource_layout_header_inline_keyed_headers_is_none",
"tests/test_resource.py::test_resource_layout_header_xlsx_multiline",
"tests/test_resource.py::test_resource_layout_header_csv_multiline_headers_join",
"tests/test_resource.py::test_resource_layout_header_csv_multiline_headers_duplicates",
"tests/test_resource.py::test_resource_layout_header_strip_and_non_strings",
"tests/test_resource.py::test_resource_layout_header_case_default",
"tests/test_resource.py::test_resource_layout_header_case_is_false",
"tests/test_resource.py::test_resource_layout_pick_fields",
"tests/test_resource.py::test_resource_layout_pick_fields_position",
"tests/test_resource.py::test_resource_layout_pick_fields_regex",
"tests/test_resource.py::test_resource_layout_pick_fields_position_and_prefix",
"tests/test_resource.py::test_resource_layout_skip_fields",
"tests/test_resource.py::test_resource_layout_skip_fields_position",
"tests/test_resource.py::test_resource_layout_skip_fields_regex",
"tests/test_resource.py::test_resource_layout_skip_fields_position_and_prefix",
"tests/test_resource.py::test_resource_layout_skip_fields_blank_header",
"tests/test_resource.py::test_resource_layout_skip_fields_blank_header_notation",
"tests/test_resource.py::test_resource_layout_skip_fields_keyed_source",
"tests/test_resource.py::test_resource_layout_limit_fields",
"tests/test_resource.py::test_resource_layout_offset_fields",
"tests/test_resource.py::test_resource_layout_limit_offset_fields",
"tests/test_resource.py::test_resource_layout_pick_rows",
"tests/test_resource.py::test_resource_layout_pick_rows_number",
"tests/test_resource.py::test_resource_layout_pick_rows_regex",
"tests/test_resource.py::test_resource_layout_skip_rows",
"tests/test_resource.py::test_resource_layout_skip_rows_excel_empty_column",
"tests/test_resource.py::test_resource_layout_skip_rows_with_headers",
"tests/test_resource.py::test_resource_layout_skip_rows_with_headers_example_from_readme",
"tests/test_resource.py::test_resource_layout_skip_rows_regex",
"tests/test_resource.py::test_resource_layout_skip_rows_preset",
"tests/test_resource.py::test_resource_layout_limit_rows",
"tests/test_resource.py::test_resource_layout_offset_rows",
"tests/test_resource.py::test_resource_layout_limit_offset_rows",
"tests/test_resource.py::test_resource_layout_limit_fields_error_zero_issue_521",
"tests/test_resource.py::test_resource_layout_offset_fields_error_zero_issue_521",
"tests/test_resource.py::test_resource_layout_limit_rows_error_zero_issue_521",
"tests/test_resource.py::test_resource_layout_offset_rows_error_zero_issue_521",
"tests/test_resource.py::test_resource_layout_respect_set_after_creation_issue_503",
"tests/test_resource.py::test_resource_schema",
"tests/test_resource.py::test_resource_schema_source_data",
"tests/test_resource.py::test_resource_schema_source_remote",
"tests/test_resource.py::test_resource_schema_from_path",
"tests/test_resource.py::test_resource_schema_from_path_with_basepath",
"tests/test_resource.py::test_resource_schema_from_path_remote",
"tests/test_resource.py::test_resource_schema_from_path_error_bad_path",
"tests/test_resource.py::test_resource_schema_from_path_error_path_not_safe",
"tests/test_resource.py::test_resource_schema_inferred",
"tests/test_resource.py::test_resource_schema_provided",
"tests/test_resource.py::test_resource_schema_unique",
"tests/test_resource.py::test_resource_schema_unique_error",
"tests/test_resource.py::test_resource_schema_primary_key",
"tests/test_resource.py::test_resource_schema_primary_key_error",
"tests/test_resource.py::test_resource_schema_foreign_keys",
"tests/test_resource.py::test_resource_schema_foreign_keys_invalid",
"tests/test_resource.py::test_resource_stats_hash",
"tests/test_resource.py::test_resource_stats_hash_md5",
"tests/test_resource.py::test_resource_stats_hash_sha1",
"tests/test_resource.py::test_resource_stats_hash_sha256",
"tests/test_resource.py::test_resource_stats_hash_sha512",
"tests/test_resource.py::test_resource_stats_hash_compressed",
"tests/test_resource.py::test_resource_stats_hash_remote",
"tests/test_resource.py::test_resource_stats_bytes",
"tests/test_resource.py::test_resource_stats_bytes_compressed",
"tests/test_resource.py::test_resource_stats_bytes_remote",
"tests/test_resource.py::test_resource_stats_fields",
"tests/test_resource.py::test_resource_stats_fields_remote",
"tests/test_resource.py::test_resource_stats_rows",
"tests/test_resource.py::test_resource_stats_rows_remote",
"tests/test_resource.py::test_resource_stats_rows_significant",
"tests/test_resource.py::test_resource_detector_field_type",
"tests/test_resource.py::test_resource_detector_field_names",
"tests/test_resource.py::test_resource_detector_field_float_numbers",
"tests/test_resource.py::test_resource_detector_field_type_with_open",
"tests/test_resource.py::test_resource_detector_field_names_with_open",
"tests/test_resource.py::test_resource_detector_schema_sync",
"tests/test_resource.py::test_resource_detector_schema_sync_with_infer",
"tests/test_resource.py::test_resource_detector_schema_patch",
"tests/test_resource.py::test_resource_detector_schema_patch_missing_values",
"tests/test_resource.py::test_resource_detector_schema_patch_with_infer",
"tests/test_resource.py::test_resource_onerror",
"tests/test_resource.py::test_resource_onerror_header_warn",
"tests/test_resource.py::test_resource_onerror_header_raise",
"tests/test_resource.py::test_resource_onerror_row_warn",
"tests/test_resource.py::test_resource_onerror_row_raise",
"tests/test_resource.py::test_resource_expand",
"tests/test_resource.py::test_resource_expand_with_dialect",
"tests/test_resource.py::test_resource_expand_with_schema",
"tests/test_resource.py::test_resource_infer",
"tests/test_resource.py::test_resource_infer_source_non_tabular",
"tests/test_resource.py::test_resource_infer_from_path",
"tests/test_resource.py::test_resource_infer_not_slugified_name_issue_531",
"tests/test_resource.py::test_resource_open",
"tests/test_resource.py::test_resource_open_read_rows",
"tests/test_resource.py::test_resource_open_row_stream",
"tests/test_resource.py::test_resource_open_row_stream_iterate",
"tests/test_resource.py::test_resource_open_row_stream_error_cells",
"tests/test_resource.py::test_resource_open_row_stream_blank_cells",
"tests/test_resource.py::test_resource_open_read_lists",
"tests/test_resource.py::test_resource_open_list_stream",
"tests/test_resource.py::test_resource_open_list_stream_iterate",
"tests/test_resource.py::test_resource_open_empty",
"tests/test_resource.py::test_resource_open_without_rows",
"tests/test_resource.py::test_resource_open_without_headers",
"tests/test_resource.py::test_resource_open_source_error_data",
"tests/test_resource.py::test_resource_reopen",
"tests/test_resource.py::test_resource_reopen_and_detector_sample_size",
"tests/test_resource.py::test_resource_reopen_generator",
"tests/test_resource.py::test_resource_read_bytes",
"tests/test_resource.py::test_resource_read_text",
"tests/test_resource.py::test_resource_read_data",
"tests/test_resource.py::test_resource_read_lists",
"tests/test_resource.py::test_resource_read_rows",
"tests/test_resource.py::test_resource_write",
"tests/test_resource.py::test_resource_write_to_path",
"tests/test_resource.py::test_resource_write_format_error_bad_format",
"tests/test_resource.py::test_resource_to_copy",
"tests/test_resource.py::test_resource_to_json",
"tests/test_resource.py::test_resource_to_yaml",
"tests/test_resource.py::test_to_json_with_resource_data_is_not_a_list_issue_693",
"tests/test_resource.py::test_to_yaml_with_resource_data_is_not_a_list_issue_693",
"tests/test_resource.py::test_to_yaml_allow_unicode_issue_844",
"tests/test_resource.py::test_resource_to_view",
"tests/test_resource.py::test_resource_metadata_bad_schema_format",
"tests/test_resource.py::test_resource_reset_on_close_issue_190",
"tests/test_resource.py::test_resource_skip_blank_at_the_end_issue_bco_dmo_33",
"tests/test_resource.py::test_resource_not_existent_local_file_with_no_format_issue_287",
"tests/test_resource.py::test_resource_not_existent_remote_file_with_no_format_issue_287",
"tests/test_resource.py::test_resource_chardet_raises_remote_issue_305",
"tests/test_resource.py::test_resource_skip_rows_non_string_cell_issue_320",
"tests/test_resource.py::test_resource_skip_rows_non_string_cell_issue_322",
"tests/test_resource.py::test_resource_relative_parent_path_with_trusted_option_issue_171",
"tests/test_resource.py::test_resource_preserve_format_from_descriptor_on_infer_issue_188",
"tests/test_resource.py::test_resource_set_base_path",
"tests/test_resource.py::test_resource_set_detector",
"tests/test_resource.py::test_resource_set_onerror",
"tests/test_resource.py::test_resource_set_trusted",
"tests/test_resource.py::test_resource_set_package",
"tests/test_schema.py::test_schema",
"tests/test_schema.py::test_schema_extract_metadata_error",
"tests/test_schema.py::test_schema_metadata_invalid",
"tests/test_schema.py::test_schema_descriptor",
"tests/test_schema.py::test_schema_descriptor_path",
"tests/test_schema.py::test_schema_descriptor_url",
"tests/test_schema.py::test_schema_read_cells",
"tests/test_schema.py::test_schema_read_cells_null_values",
"tests/test_schema.py::test_schema_read_cells_too_short",
"tests/test_schema.py::test_schema_read_cells_too_long",
"tests/test_schema.py::test_schema_read_cells_wrong_type",
"tests/test_schema.py::test_schema_missing_values",
"tests/test_schema.py::test_schema_fields",
"tests/test_schema.py::test_schema_get_field",
"tests/test_schema.py::test_schema_get_field_error_not_found",
"tests/test_schema.py::test_schema_update_field",
"tests/test_schema.py::test_schema_has_field",
"tests/test_schema.py::test_schema_remove_field",
"tests/test_schema.py::test_schema_remove_field_error_not_found",
"tests/test_schema.py::test_schema_field_names",
"tests/test_schema.py::test_schema_primary_key",
"tests/test_schema.py::test_schema_foreign_keys",
"tests/test_schema.py::test_schema_add_then_remove_field",
"tests/test_schema.py::test_schema_primary_foreign_keys_as_array",
"tests/test_schema.py::test_schema_primary_foreign_keys_as_string",
"tests/test_schema.py::test_schema_metadata_valid",
"tests/test_schema.py::test_schema_metadata_not_valid",
"tests/test_schema.py::test_schema_metadata_not_valid_multiple_errors",
"tests/test_schema.py::test_schema_metadata_not_valid_multiple_errors_with_pk",
"tests/test_schema.py::test_schema_metadata_error_message",
"tests/test_schema.py::test_schema_valid_examples",
"tests/test_schema.py::test_schema_invalid_example",
"tests/test_schema.py::test_schema_standard_specs_properties[create_descriptor0]",
"tests/test_schema.py::test_schema_standard_specs_properties[create_descriptor1]",
"tests/test_schema.py::test_schema_descriptor_expand",
"tests/test_schema.py::test_schema_to_copy",
"tests/test_schema.py::test_schema_to_json",
"tests/test_schema.py::test_schema_to_yaml",
"tests/test_schema.py::test_schema_from_jsonschema",
"tests/test_schema.py::test_schema_metadata_bad_schema_format",
"tests/test_schema.py::test_schema_field_date_format_issue_177",
"tests/test_schema.py::test_schema_field_time_format_issue_177",
"tests/test_schema.py::test_schema_add_remove_field_issue_218",
"tests/test_schema.py::test_schema_not_supported_type_issue_goodatbles_304"
] | [] | MIT License | 12,666 | 191 | [
"frictionless/metadata.py"
] |
|
planetlabs__planet-client-python-429 | 6a25692e87c700eb9ba053491a2b54834aa1f74a | 2022-04-20 16:03:12 | 50c73a594f5c28d43670135d451beb5d12a6ba57 | diff --git a/planet/cli/orders.py b/planet/cli/orders.py
index 4caf843..01f677e 100644
--- a/planet/cli/orders.py
+++ b/planet/cli/orders.py
@@ -94,11 +94,15 @@ async def get(ctx, order_id, pretty):
@coro
@click.argument('order_id', type=click.UUID)
async def cancel(ctx, order_id):
- '''Cancel order by order ID.'''
+ '''Cancel order by order ID.
+
+ This command cancels a queued order and outputs the cancelled order
+ details.
+ '''
async with orders_client(ctx) as cl:
- await cl.cancel_order(str(order_id))
+ json_resp = await cl.cancel_order(str(order_id))
- click.echo('Cancelled')
+ click.echo(json_resp)
def split_list_arg(ctx, param, value):
diff --git a/planet/clients/orders.py b/planet/clients/orders.py
index 318a82e..dfa7ffb 100644
--- a/planet/clients/orders.py
+++ b/planet/clients/orders.py
@@ -177,18 +177,14 @@ class OrdersClient():
order = Order(resp.json())
return order
- async def cancel_order(self, order_id: str) -> Response:
+ async def cancel_order(self, order_id: str) -> dict:
'''Cancel a queued order.
- **Note:** According to the API docs, cancel order should return the
- cancelled order details. But testing reveals that an empty response is
- returned upon success.
-
Parameters:
order_id: The ID of the order
Returns:
- Empty response
+ Results of the cancel request
Raises:
planet.exceptions.ClientError: If order_id is not a valid UUID.
@@ -198,8 +194,8 @@ class OrdersClient():
url = f'{self._orders_url()}/{order_id}'
req = self._request(url, method='PUT')
-
- await self._do_request(req)
+ resp = await self._do_request(req)
+ return resp.json()
async def cancel_orders(self, order_ids: typing.List[str] = None) -> dict:
'''Cancel queued orders in bulk.
| Update cancel order to report the details of the cancelled order
Currently, when an order is cancelled, the CLI reports 'Cancelled' and the python API returns nothing. However, when an order is cancelled, the Orders API [returns the order details](https://developers.planet.com/docs/orders/reference/#operation/cancelOrder). To align with the api, change these both to return the details of the cancelled order.
Also, update the SDK and CLI documentation accordingly.
CLI documentation:
```
async def cancel(ctx, order_id, pretty):
"""Cancel order.
This command outputs the cancelled order details, optionally
pretty-printed.
"""
```
Blocked by #362 | planetlabs/planet-client-python | diff --git a/tests/integration/test_orders_api.py b/tests/integration/test_orders_api.py
index 03bf270..53f991d 100644
--- a/tests/integration/test_orders_api.py
+++ b/tests/integration/test_orders_api.py
@@ -291,12 +291,12 @@ async def test_cancel_order(oid, order_description, session):
cancel_url = f'{TEST_ORDERS_URL}/{oid}'
order_description['state'] = 'cancelled'
mock_resp = httpx.Response(HTTPStatus.OK, json=order_description)
+ example_resp = mock_resp.json()
respx.put(cancel_url).return_value = mock_resp
- # TODO: the api says cancel order returns the order details but as
- # far as I can test thus far, it returns nothing. follow up on this
cl = OrdersClient(session, base_url=TEST_URL)
- await cl.cancel_order(oid)
+ json_resp = await cl.cancel_order(oid)
+ assert json_resp == example_resp
@pytest.mark.asyncio
diff --git a/tests/integration/test_orders_cli.py b/tests/integration/test_orders_cli.py
index 506c89e..9d22d7a 100644
--- a/tests/integration/test_orders_cli.py
+++ b/tests/integration/test_orders_cli.py
@@ -177,7 +177,7 @@ def test_cli_orders_cancel(invoke, oid, order_description):
result = invoke(['cancel', oid])
assert not result.exception
- assert 'Cancelled\n' == result.output
+ assert str(mock_resp.json()) + '\n' == result.output
@respx.mock
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 2
} | 1.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
click==8.1.8
coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
flake8==7.2.0
future==1.0.0
h11==0.14.0
httpcore==0.12.3
httpx==0.16.1
idna==3.10
importlib_metadata==8.6.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==2.11.3
joblib==1.4.2
livereload==2.7.1
lunr==0.5.6
Markdown==3.7
MarkupSafe==1.1.1
mccabe==0.7.0
mkdocs==1.1
mkdocs-autorefs==0.1.1
mkdocs-click==0.4.0
mkdocs-material==7.2.3
mkdocs-material-extensions==1.3.1
mkdocstrings==0.15.0
nltk==3.9.1
numpy==2.0.2
packaging @ file:///croot/packaging_1734472117206/work
-e git+https://github.com/planetlabs/planet-client-python.git@6a25692e87c700eb9ba053491a2b54834aa1f74a#egg=planet
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pycodestyle==2.13.0
pyflakes==3.3.1
Pygments==2.19.1
PyJWT==2.10.1
pymdown-extensions==8.2
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.16.0
pytest-cov==6.0.0
pytkdocs==0.11.1
PyYAML==6.0.2
regex==2024.11.6
respx==0.16.3
rfc3986==1.5.0
shapely==2.0.7
six==1.17.0
sniffio==1.3.1
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
tornado==6.4.2
tqdm==4.67.1
yapf==0.43.0
zipp==3.21.0
| name: planet-client-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- click==8.1.8
- coverage==7.8.0
- flake8==7.2.0
- future==1.0.0
- h11==0.14.0
- httpcore==0.12.3
- httpx==0.16.1
- idna==3.10
- importlib-metadata==8.6.1
- jinja2==2.11.3
- joblib==1.4.2
- livereload==2.7.1
- lunr==0.5.6
- markdown==3.7
- markupsafe==1.1.1
- mccabe==0.7.0
- mkdocs==1.1
- mkdocs-autorefs==0.1.1
- mkdocs-click==0.4.0
- mkdocs-material==7.2.3
- mkdocs-material-extensions==1.3.1
- mkdocstrings==0.15.0
- nltk==3.9.1
- numpy==2.0.2
- platformdirs==4.3.7
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pygments==2.19.1
- pyjwt==2.10.1
- pymdown-extensions==8.2
- pytest-asyncio==0.16.0
- pytest-cov==6.0.0
- pytkdocs==0.11.1
- pyyaml==6.0.2
- regex==2024.11.6
- respx==0.16.3
- rfc3986==1.5.0
- shapely==2.0.7
- six==1.17.0
- sniffio==1.3.1
- tornado==6.4.2
- tqdm==4.67.1
- yapf==0.43.0
- zipp==3.21.0
prefix: /opt/conda/envs/planet-client-python
| [
"tests/integration/test_orders_api.py::test_cancel_order",
"tests/integration/test_orders_cli.py::test_cli_orders_cancel"
] | [] | [
"tests/integration/test_orders_api.py::test_OrderStates_reached",
"tests/integration/test_orders_api.py::test_OrderStates_passed",
"tests/integration/test_orders_api.py::test_list_orders_basic",
"tests/integration/test_orders_api.py::test_list_orders_state",
"tests/integration/test_orders_api.py::test_list_orders_state_invalid_state",
"tests/integration/test_orders_api.py::test_list_orders_limit",
"tests/integration/test_orders_api.py::test_list_orders_asjson",
"tests/integration/test_orders_api.py::test_create_order",
"tests/integration/test_orders_api.py::test_create_order_bad_item_type",
"tests/integration/test_orders_api.py::test_create_order_item_id_does_not_exist",
"tests/integration/test_orders_api.py::test_get_order",
"tests/integration/test_orders_api.py::test_get_order_invalid_id",
"tests/integration/test_orders_api.py::test_get_order_id_doesnt_exist",
"tests/integration/test_orders_api.py::test_cancel_order_invalid_id",
"tests/integration/test_orders_api.py::test_cancel_order_id_doesnt_exist",
"tests/integration/test_orders_api.py::test_cancel_order_id_cannot_be_cancelled",
"tests/integration/test_orders_api.py::test_cancel_orders_by_ids",
"tests/integration/test_orders_api.py::test_cancel_orders_by_ids_invalid_id",
"tests/integration/test_orders_api.py::test_cancel_orders_all",
"tests/integration/test_orders_api.py::test_wait_default",
"tests/integration/test_orders_api.py::test_wait_callback",
"tests/integration/test_orders_api.py::test_wait_state",
"tests/integration/test_orders_api.py::test_wait_max_attempts_enabled",
"tests/integration/test_orders_api.py::test_wait_max_attempts_disabled",
"tests/integration/test_orders_api.py::test_wait_invalid_oid",
"tests/integration/test_orders_api.py::test_wait_invalid_state",
"tests/integration/test_orders_api.py::test_aggegated_order_stats",
"tests/integration/test_orders_api.py::test_download_asset_md",
"tests/integration/test_orders_api.py::test_download_asset_img",
"tests/integration/test_orders_api.py::test_download_order_success",
"tests/integration/test_orders_api.py::test_download_order_state",
"tests/integration/test_orders_api.py::test_download_order_overwrite_true_preexisting_data",
"tests/integration/test_orders_api.py::test_download_order_overwrite_false_preexisting_data",
"tests/integration/test_orders_api.py::test_download_order_overwrite_true_nonexisting_data",
"tests/integration/test_orders_api.py::test_download_order_overwrite_false_nonexisting_data",
"tests/integration/test_orders_cli.py::test_split_list_arg_empty_string",
"tests/integration/test_orders_cli.py::test_split_list_arg_None",
"tests/integration/test_orders_cli.py::test_cli_orders_list_basic",
"tests/integration/test_orders_cli.py::test_cli_orders_list_empty",
"tests/integration/test_orders_cli.py::test_cli_orders_list_state",
"tests/integration/test_orders_cli.py::test_cli_orders_list_limit",
"tests/integration/test_orders_cli.py::test_cli_orders_list_pretty",
"tests/integration/test_orders_cli.py::test_cli_orders_get",
"tests/integration/test_orders_cli.py::test_cli_orders_get_id_not_found",
"tests/integration/test_orders_cli.py::test_cli_orders_cancel_id_not_found",
"tests/integration/test_orders_cli.py::test_cli_orders_wait_default",
"tests/integration/test_orders_cli.py::test_cli_orders_wait_max_attempts",
"tests/integration/test_orders_cli.py::test_cli_orders_wait_quiet",
"tests/integration/test_orders_cli.py::test_cli_orders_download_default",
"tests/integration/test_orders_cli.py::test_cli_orders_download_dest",
"tests/integration/test_orders_cli.py::test_cli_orders_download_overwrite",
"tests/integration/test_orders_cli.py::test_cli_orders_download_quiet",
"tests/integration/test_orders_cli.py::test_cli_orders_download_state",
"tests/integration/test_orders_cli.py::test_cli_orders_create_basic_success[4500474_2133707_2021-05-20_2419-expected_ids0]",
"tests/integration/test_orders_cli.py::test_cli_orders_create_basic_success[4500474_2133707_2021-05-20_2419,4500474_2133707_2021-05-20_2420-expected_ids1]",
"tests/integration/test_orders_cli.py::test_cli_orders_create_basic_item_type_invalid",
"tests/integration/test_orders_cli.py::test_cli_orders_create_id_empty",
"tests/integration/test_orders_cli.py::test_cli_orders_create_clip",
"tests/integration/test_orders_cli.py::test_cli_orders_create_clip_featureclass",
"tests/integration/test_orders_cli.py::test_cli_orders_create_clip_invalid_geometry",
"tests/integration/test_orders_cli.py::test_cli_orders_create_clip_and_tools",
"tests/integration/test_orders_cli.py::test_cli_orders_create_cloudconfig",
"tests/integration/test_orders_cli.py::test_cli_orders_create_email",
"tests/integration/test_orders_cli.py::test_cli_orders_create_tools",
"tests/integration/test_orders_cli.py::test_cli_orders_read_file_json_doesnotexist",
"tests/integration/test_orders_cli.py::test_cli_orders_read_file_json_invalidjson"
] | [] | Apache License 2.0 | 12,668 | 518 | [
"planet/cli/orders.py",
"planet/clients/orders.py"
] |
|
projectmesa__mesa-1287 | f4f44ad1d5fb5d30651cabc8c8557a97c7f737e6 | 2022-04-21 06:31:11 | 991a01753ea1b3b6c956aea5ba7fccd9f2d8270b | codecov[bot]: # [Codecov](https://codecov.io/gh/projectmesa/mesa/pull/1287?src=pr&el=h1&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa) Report
> Merging [#1287](https://codecov.io/gh/projectmesa/mesa/pull/1287?src=pr&el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa) (fe85f80) into [main](https://codecov.io/gh/projectmesa/mesa/commit/eb73aac0cf7bf129a4ac93187ff76a4fd9e3ae39?el=desc&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa) (eb73aac) will **decrease** coverage by `0.75%`.
> The diff coverage is `25.00%`.
```diff
@@ Coverage Diff @@
## main #1287 +/- ##
==========================================
- Coverage 89.30% 88.54% -0.76%
==========================================
Files 19 19
Lines 1234 1240 +6
Branches 243 243
==========================================
- Hits 1102 1098 -4
- Misses 99 107 +8
- Partials 33 35 +2
```
| [Impacted Files](https://codecov.io/gh/projectmesa/mesa/pull/1287?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa) | Coverage Δ | |
|---|---|---|
| [mesa/model.py](https://codecov.io/gh/projectmesa/mesa/pull/1287/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa#diff-bWVzYS9tb2RlbC5weQ==) | `81.25% <25.00%> (-18.75%)` | :arrow_down: |
| [mesa/visualization/ModularVisualization.py](https://codecov.io/gh/projectmesa/mesa/pull/1287/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa#diff-bWVzYS92aXN1YWxpemF0aW9uL01vZHVsYXJWaXN1YWxpemF0aW9uLnB5) | `74.04% <0.00%> (-1.96%)` | :arrow_down: |
| [mesa/visualization/UserParam.py](https://codecov.io/gh/projectmesa/mesa/pull/1287/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa#diff-bWVzYS92aXN1YWxpemF0aW9uL1VzZXJQYXJhbS5weQ==) | `86.00% <0.00%> (-0.80%)` | :arrow_down: |
| [mesa/main.py](https://codecov.io/gh/projectmesa/mesa/pull/1287/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa#diff-bWVzYS9tYWluLnB5) | `96.00% <0.00%> (-0.43%)` | :arrow_down: |
| [mesa/datacollection.py](https://codecov.io/gh/projectmesa/mesa/pull/1287/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa#diff-bWVzYS9kYXRhY29sbGVjdGlvbi5weQ==) | `97.67% <0.00%> (-0.03%)` | :arrow_down: |
| [mesa/space.py](https://codecov.io/gh/projectmesa/mesa/pull/1287/diff?src=pr&el=tree&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa#diff-bWVzYS9zcGFjZS5weQ==) | `94.45% <0.00%> (-0.02%)` | :arrow_down: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/projectmesa/mesa/pull/1287?src=pr&el=continue&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/projectmesa/mesa/pull/1287?src=pr&el=footer&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa). Last update [eb73aac...fe85f80](https://codecov.io/gh/projectmesa/mesa/pull/1287?src=pr&el=lastupdated&utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments?utm_medium=referral&utm_source=github&utm_content=comment&utm_campaign=pr+comments&utm_term=projectmesa).
rht: By trying this PR, I did not mean for it to be merged. I meant for the PR to be installed as is.
jackiekazil: Should we add [WIP] in this case then?
On Thu, Apr 21, 2022 at 6:13 AM rht ***@***.***> wrote:
> By trying this PR, I did not mean for it to be merged. I meant for the PR
> to be installed as is.
>
> —
> Reply to this email directly, view it on GitHub
> <https://github.com/projectmesa/mesa/pull/1287#issuecomment-1105010392>,
> or unsubscribe
> <https://github.com/notifications/unsubscribe-auth/AABIWTQAL6BDWG4LRQCB3KLVGES4NANCNFSM5T6FSEHA>
> .
> You are receiving this because you are subscribed to this thread.Message
> ID: ***@***.***>
>
--
Jacqueline Kazil | @jackiekazil
rht: @tpike3 this PR is ready. | diff --git a/mesa/model.py b/mesa/model.py
index fddf68ea..6180a455 100644
--- a/mesa/model.py
+++ b/mesa/model.py
@@ -6,6 +6,8 @@ Core Objects: Model
"""
import random
+from mesa.datacollection import DataCollector
+
# mypy
from typing import Any, Optional
@@ -61,3 +63,22 @@ class Model:
seed = self._seed
self.random.seed(seed)
self._seed = seed
+
+ def initialize_data_collector(
+ self, model_reporters=None, agent_reporters=None, tables=None
+ ) -> None:
+ if not hasattr(self, "schedule") or self.schedule is None:
+ raise RuntimeError(
+ "You must initialize the scheduler (self.schedule) before initializing the data collector."
+ )
+ if self.schedule.get_agent_count() == 0:
+ raise RuntimeError(
+ "You must add agents to the scheduler before initializing the data collector."
+ )
+ self.datacollector = DataCollector(
+ model_reporters=model_reporters,
+ agent_reporters=agent_reporters,
+ tables=tables,
+ )
+ # Collect data for the first time during initialization.
+ self.datacollector.collect(self)
| bug: batch_run assumes model datacollector is called datacollector
**Describe the bug**
batch_run assumes user will call `DataCollector` `self.datacollector`. This is not annotated in the documentation, nor is it necessarily a requirement
**Expected behavior**
users can name their model `DataCollector` whatever they want (e.g. `self.dc`)
**To Reproduce**
create model and name `DataCollector` anything else and then execute model with batch_run and try to retrieve the data.
**Additional context**
Bug identified via students from the University of Mary Washington. They also had some very positive comments from their Professor
"They keep saying "wow, that's so easy! That makes it so fun!" Thanks to you and your crew for all you do."
| projectmesa/mesa | diff --git a/tests/test_datacollector.py b/tests/test_datacollector.py
index 0221b7da..0f2bd4c0 100644
--- a/tests/test_datacollector.py
+++ b/tests/test_datacollector.py
@@ -5,7 +5,6 @@ import unittest
from mesa import Model, Agent
from mesa.time import BaseScheduler
-from mesa.datacollection import DataCollector
class MockAgent(Agent):
@@ -47,7 +46,7 @@ class MockModel(Model):
for i in range(10):
a = MockAgent(i, self, val=i)
self.schedule.add(a)
- self.datacollector = DataCollector(
+ self.initialize_data_collector(
{
"total_agents": lambda m: m.schedule.get_agent_count(),
"model_value": "model_val",
@@ -103,10 +102,11 @@ class TestDataCollector(unittest.TestCase):
assert "model_calc" in data_collector.model_vars
assert "model_calc_comp" in data_collector.model_vars
assert "model_calc_fail" in data_collector.model_vars
- assert len(data_collector.model_vars["total_agents"]) == 7
- assert len(data_collector.model_vars["model_value"]) == 7
- assert len(data_collector.model_vars["model_calc"]) == 7
- assert len(data_collector.model_vars["model_calc_comp"]) == 7
+ length = 8
+ assert len(data_collector.model_vars["total_agents"]) == length
+ assert len(data_collector.model_vars["model_value"]) == length
+ assert len(data_collector.model_vars["model_calc"]) == length
+ assert len(data_collector.model_vars["model_calc_comp"]) == length
self.step_assertion(data_collector.model_vars["total_agents"])
for element in data_collector.model_vars["model_value"]:
assert element == 100
@@ -123,7 +123,7 @@ class TestDataCollector(unittest.TestCase):
data_collector = self.model.datacollector
agent_table = data_collector.get_agent_vars_dataframe()
- assert len(data_collector._agent_records) == 7
+ assert len(data_collector._agent_records) == 8
for step, records in data_collector._agent_records.items():
if step < 5:
assert len(records) == 10
@@ -165,13 +165,35 @@ class TestDataCollector(unittest.TestCase):
model_vars = data_collector.get_model_vars_dataframe()
agent_vars = data_collector.get_agent_vars_dataframe()
table_df = data_collector.get_table_dataframe("Final_Values")
- assert model_vars.shape == (7, 5)
- assert agent_vars.shape == (67, 2)
+ assert model_vars.shape == (8, 5)
+ assert agent_vars.shape == (77, 2)
assert table_df.shape == (9, 2)
with self.assertRaises(Exception):
table_df = data_collector.get_table_dataframe("not a real table")
+class TestDataCollectorInitialization(unittest.TestCase):
+ def setUp(self):
+ self.model = Model()
+
+ def test_initialize_before_scheduler(self):
+ with self.assertRaises(RuntimeError) as cm:
+ self.model.initialize_data_collector()
+ self.assertEqual(
+ str(cm.exception),
+ "You must initialize the scheduler (self.schedule) before initializing the data collector.",
+ )
+
+ def test_initialize_before_agents_added_to_scheduler(self):
+ with self.assertRaises(RuntimeError) as cm:
+ self.model.schedule = BaseScheduler(self)
+ self.model.initialize_data_collector()
+ self.assertEqual(
+ str(cm.exception),
+ "You must add agents to the scheduler before initializing the data collector.",
+ )
+
+
if __name__ == "__main__":
unittest.main()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "Pipfile",
"pip_packages": [
"pytest",
"pytest-cov",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
arrow==1.3.0
babel==2.17.0
binaryornot==0.4.4
black==25.1.0
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
click==8.1.8
cookiecutter==2.6.0
coverage==7.8.0
docutils==0.21.2
exceptiongroup==1.2.2
flake8==7.2.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig==2.1.0
Jinja2==3.1.6
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mccabe==0.7.0
mdurl==0.1.2
-e git+https://github.com/projectmesa/mesa.git@f4f44ad1d5fb5d30651cabc8c8557a97c7f737e6#egg=Mesa
mypy-extensions==1.0.0
networkx==3.2.1
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pathspec==0.12.1
pipfile==0.0.2
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
Pygments==2.19.1
pytest==8.3.5
pytest-cov==6.0.0
python-dateutil==2.9.0.post0
python-slugify==8.0.4
pytz==2025.2
PyYAML==6.0.2
requests==2.32.3
rich==14.0.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
text-unidecode==1.3
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==2.2.1
tornado==6.4.2
tqdm==4.67.1
types-python-dateutil==2.9.0.20241206
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
zipp==3.21.0
| name: mesa
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- pipfile=0.0.2=py_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- arrow==1.3.0
- babel==2.17.0
- binaryornot==0.4.4
- black==25.1.0
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- click==8.1.8
- cookiecutter==2.6.0
- coverage==7.8.0
- docutils==0.21.2
- exceptiongroup==1.2.2
- flake8==7.2.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- iniconfig==2.1.0
- jinja2==3.1.6
- markdown-it-py==3.0.0
- markupsafe==3.0.2
- mccabe==0.7.0
- mdurl==0.1.2
- mypy-extensions==1.0.0
- networkx==3.2.1
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pathspec==0.12.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pygments==2.19.1
- pytest==8.3.5
- pytest-cov==6.0.0
- python-dateutil==2.9.0.post0
- python-slugify==8.0.4
- pytz==2025.2
- pyyaml==6.0.2
- requests==2.32.3
- rich==14.0.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- text-unidecode==1.3
- tomli==2.2.1
- tornado==6.4.2
- tqdm==4.67.1
- types-python-dateutil==2.9.0.20241206
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/mesa
| [
"tests/test_datacollector.py::TestDataCollector::test_agent_records",
"tests/test_datacollector.py::TestDataCollector::test_exports",
"tests/test_datacollector.py::TestDataCollector::test_model_vars",
"tests/test_datacollector.py::TestDataCollector::test_table_rows",
"tests/test_datacollector.py::TestDataCollectorInitialization::test_initialize_before_agents_added_to_scheduler",
"tests/test_datacollector.py::TestDataCollectorInitialization::test_initialize_before_scheduler"
] | [] | [] | [] | Apache License 2.0 | 12,673 | 295 | [
"mesa/model.py"
] |
microsoft__debugpy-918 | 7be59933b1df69d22e838f99e54fcdb5afc8cff8 | 2022-04-21 19:17:16 | 5723ed6b1a058145d727ae5e4b065bd65072f35b | sonarcloud[bot]: Kudos, SonarCloud Quality Gate passed! [](https://sonarcloud.io/dashboard?id=microsoft_debugpy&pullRequest=918)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=BUG) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=BUG) [0 Bugs](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=BUG)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=VULNERABILITY) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=VULNERABILITY) [0 Vulnerabilities](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=VULNERABILITY)
[](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=918&resolved=false&types=SECURITY_HOTSPOT) [](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=918&resolved=false&types=SECURITY_HOTSPOT) [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=918&resolved=false&types=SECURITY_HOTSPOT)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=CODE_SMELL) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=CODE_SMELL) [0 Code Smells](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=CODE_SMELL)
[](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=918&metric=coverage&view=list) No Coverage information
[](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=918&metric=duplicated_lines_density&view=list) No Duplication information
sonarcloud[bot]: Kudos, SonarCloud Quality Gate passed! [](https://sonarcloud.io/dashboard?id=microsoft_debugpy&pullRequest=918)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=BUG) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=BUG) [0 Bugs](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=BUG)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=VULNERABILITY) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=VULNERABILITY) [0 Vulnerabilities](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=VULNERABILITY)
[](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=918&resolved=false&types=SECURITY_HOTSPOT) [](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=918&resolved=false&types=SECURITY_HOTSPOT) [0 Security Hotspots](https://sonarcloud.io/project/security_hotspots?id=microsoft_debugpy&pullRequest=918&resolved=false&types=SECURITY_HOTSPOT)
[](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=CODE_SMELL) [](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=CODE_SMELL) [0 Code Smells](https://sonarcloud.io/project/issues?id=microsoft_debugpy&pullRequest=918&resolved=false&types=CODE_SMELL)
[](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=918&metric=coverage&view=list) No Coverage information
[](https://sonarcloud.io/component_measures?id=microsoft_debugpy&pullRequest=918&metric=duplicated_lines_density&view=list) No Duplication information
| diff --git a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py
index f214fa7c..1ed89636 100644
--- a/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py
+++ b/src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py
@@ -377,13 +377,17 @@ def _update_globals_and_locals(updated_globals, initial_globals, frame):
# Still, the approach to have a single namespace was chosen because it was the only
# one that enabled creating and using variables during the same evaluation.
assert updated_globals is not None
- changed = False
+ f_locals = None
for key, val in updated_globals.items():
if initial_globals.get(key) is not val:
- changed = True
- frame.f_locals[key] = val
+ if f_locals is None:
+ # Note: we call f_locals only once because each time
+ # we call it the values may be reset.
+ f_locals = frame.f_locals
- if changed:
+ f_locals[key] = val
+
+ if f_locals is not None:
pydevd_save_locals.save_locals(frame)
| Some variables not assigned when using debug console
Issue Type: <b>Bug</b>
Tested on VSCode 1.66.x. Does not seem to occur with 1.65.x
1. Run some code
2. stop on a break point.
3. some variables assigned by code prior to beakpoint (e.g. A = 10, B = 30)
4. execute a function that returns multiple variable and put into existing variables in local context (e.g. A, B = some_fun())
5. A does not get assigned with a new value.
Here is some code that reproduces the problem (with line numbers). Set a breakpoint on line
```
import numpy as np
def some_fun():
return np.random.randn(), np.random.randn()
def some_fun_2():
return np.random.randn()
def main():
A = 10
B = 34
C = 11
A, B = some_fun() # this assignment works when stepping
print(A) # break on this line
```
Once the code has stopped on breakpoint, execute A, B = some_fun() in the Debug Console to experiment the bug.
These have the same erroneous behaviour:
`A, B = np.random.randn(), np.random.randn() `
`B, A = np.random.randn(), np.random.randn() `
`B, A = some_fun() `
However, `A = some_fun_2()` works fine.
python 3.8.10
Extension version: 2022.4.1
VS Code version: Code 1.66.2 (dfd34e8260c270da74b5c2d86d61aee4b6d56977, 2022-04-11T07:49:20.994Z)
OS version: Darwin x64 20.6.0
Restricted Mode: No
<details>
<summary>System Info</summary>
|Item|Value|
|---|---|
|CPUs|Intel(R) Core(TM) i7-8850H CPU @ 2.60GHz (12 x 2600)|
|GPU Status|2d_canvas: enabled<br>canvas_oop_rasterization: disabled_off<br>direct_rendering_display_compositor: disabled_off_ok<br>gpu_compositing: enabled<br>metal: disabled_off<br>multiple_raster_threads: enabled_on<br>oop_rasterization: enabled<br>opengl: enabled_on<br>rasterization: enabled<br>raw_draw: disabled_off_ok<br>skia_renderer: enabled_on<br>video_decode: enabled<br>video_encode: enabled<br>webgl: enabled<br>webgl2: enabled|
|Load (avg)|3, 3, 3|
|Memory (System)|32.00GB (0.06GB free)|
|Process Argv|--crash-reporter-id 2a273040-cc5d-49bc-b505-46f4293dc6a3 --crash-reporter-id 2a273040-cc5d-49bc-b505-46f4293dc6a3|
|Screen Reader|no|
|VM|0%|
</details><details>
<summary>A/B Experiments</summary>
```
vsliv368cf:30146710
vsreu685:30147344
python383cf:30185419
vspor879:30202332
vspor708:30202333
vspor363:30204092
pythonvspyl392:30443607
pythontb:30283811
pythonptprofiler:30281270
vshan820:30294714
vstes263:30335439
vscoreces:30445986
pythondataviewer:30285071
vscod805cf:30301675
pythonvspyt200:30340761
binariesv615:30325510
bridge0708:30335490
bridge0723:30353136
vsaa593cf:30376535
vsc1dst:30438360
pythonvs932:30410667
wslgetstarted:30449410
pythonvsnew555:30457759
vscscmwlcmt:30465135
vscaat:30438848
```
</details>
<!-- generated by issue reporter --> | microsoft/debugpy | diff --git a/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py b/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py
index 881f55a5..1adcce34 100644
--- a/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py
+++ b/src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py
@@ -9,6 +9,8 @@ global_frame = sys._getframe()
def obtain_frame():
+ A = 1
+ B = 2
yield sys._getframe()
@@ -116,3 +118,15 @@ def test_evaluate_expression_4(disable_critical_log):
assert 'email' in sys._getframe().f_globals
del sys._getframe().f_globals['email']
assert 'email' not in sys._getframe().f_globals
+
+
+def test_evaluate_expression_5(disable_critical_log):
+ from _pydevd_bundle.pydevd_vars import evaluate_expression
+
+ def check(frame):
+ eval_txt = 'A, B = 5, 6'
+ evaluate_expression(None, frame, eval_txt, is_exec=True)
+ assert frame.f_locals['A'] == 5
+ assert frame.f_locals['B'] == 6
+
+ check(next(iter(obtain_frame())))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_git_commit_hash"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "",
"pip_packages": [
"black",
"flake8",
"tox",
"pytest"
],
"pre_install": [],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | black==25.1.0
cachetools==5.5.2
chardet==5.2.0
click==8.1.8
colorama==0.4.6
-e git+https://github.com/microsoft/debugpy.git@7be59933b1df69d22e838f99e54fcdb5afc8cff8#egg=debugpy
distlib==0.3.9
exceptiongroup==1.2.2
filelock==3.18.0
flake8==7.2.0
iniconfig==2.1.0
mccabe==0.7.0
mypy-extensions==1.0.0
packaging==24.2
pathspec==0.12.1
platformdirs==4.3.7
pluggy==1.5.0
pycodestyle==2.13.0
pyflakes==3.3.1
pyproject-api==1.9.0
pytest==8.3.5
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
virtualenv==20.29.3
| name: debugpy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- black==25.1.0
- cachetools==5.5.2
- chardet==5.2.0
- click==8.1.8
- colorama==0.4.6
- debugpy==1.6.0+18.g7be59933
- distlib==0.3.9
- exceptiongroup==1.2.2
- filelock==3.18.0
- flake8==7.2.0
- iniconfig==2.1.0
- mccabe==0.7.0
- mypy-extensions==1.0.0
- packaging==24.2
- pathspec==0.12.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pyproject-api==1.9.0
- pytest==8.3.5
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/debugpy
| [
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_5"
] | [] | [
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_basic",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_1",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_2",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_3",
"src/debugpy/_vendored/pydevd/tests_python/test_evaluate_expression.py::test_evaluate_expression_4"
] | [] | MIT License | 12,677 | 316 | [
"src/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py"
] |
python-pillow__Pillow-6234 | de1ba373e10065e7bffe4bdb18a4aec40ef306a2 | 2022-04-21 21:41:19 | 14169c5e2d738f0eda2e997d64c94f1cc8ee319f | diff --git a/src/PIL/ImageFont.py b/src/PIL/ImageFont.py
index 81ac03fe6..4799d71fb 100644
--- a/src/PIL/ImageFont.py
+++ b/src/PIL/ImageFont.py
@@ -711,8 +711,13 @@ class FreeTypeFont:
:return: A FreeTypeFont object.
"""
+ if font is None:
+ try:
+ font = BytesIO(self.font_bytes)
+ except AttributeError:
+ font = self.path
return FreeTypeFont(
- font=self.path if font is None else font,
+ font=font,
size=self.size if size is None else size,
index=self.index if index is None else index,
encoding=self.encoding if encoding is None else encoding,
| Using font_variant on a font whose path is defined by BytesIO fails
### What did you do?
Opened a font using
```py
font = ImageFont.truetype(
BytesIO(pkgutil.get_data(__package__, path)),
size
)
```
then tried to edit the font using font_variant.
```py
# doesn't work, it raises "OSError: cannot open resource"
font = font.font_variant(size=font.size+10)
```
However, by doing a bit more code i managed to get it to work
```py
# does work
font = font.font_variant(
font=BytesIO(font.path.getvalue()),
size=font.size+10
)
```
Aditionally
```py
# does work
font.path.seek(0)
font = font.font_variant(
size=font.size+10
)
```
I assume this is something todo with font_variant parsing in `font.path` to the init of a new font object and as `BytesIO` is already read, it can't be read again. To fix this it could have a check if its a file like object and seek to the begining again?
### What did you expect to happen?
I expected the font to be successfully changed
### What actually happened?
It raised the error
```py
line 101, in get_font_size
font = font.font_variant(size=font.size+10)
line 715, in font_variant
return FreeTypeFont(
line 230, in __init__
load_from_bytes(font)
line 211, in load_from_bytes
self.font = core.getfont(
OSError: cannot open resource
```
### What are your OS, Python and Pillow versions?
* OS: Windows 11
* Python: 3.10
* Pillow: 9.1.0 | python-pillow/Pillow | diff --git a/Tests/test_imagefont.py b/Tests/test_imagefont.py
index 0e1d1e637..0c50303f9 100644
--- a/Tests/test_imagefont.py
+++ b/Tests/test_imagefont.py
@@ -65,9 +65,12 @@ class TestImageFont:
return font_bytes
def test_font_with_filelike(self):
- ImageFont.truetype(
+ ttf = ImageFont.truetype(
self._font_as_bytes(), FONT_SIZE, layout_engine=self.LAYOUT_ENGINE
)
+ ttf_copy = ttf.font_variant()
+ assert ttf_copy.font_bytes == ttf.font_bytes
+
self._render(self._font_as_bytes())
# Usage note: making two fonts from the same buffer fails.
# shared_bytes = self._font_as_bytes()
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 9.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libjpeg-dev zlib1g-dev libtiff5-dev libfreetype6-dev liblcms2-dev libwebp-dev libopenjp2-7-dev libimagequant-dev libraqm-dev libxcb1-dev"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
-e git+https://github.com/python-pillow/Pillow.git@de1ba373e10065e7bffe4bdb18a4aec40ef306a2#egg=Pillow
pluggy==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
tomli==2.2.1
| name: Pillow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- tomli==2.2.1
prefix: /opt/conda/envs/Pillow
| [
"Tests/test_imagefont.py::TestImageFont::test_font_with_filelike",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_font_with_filelike"
] | [] | [
"Tests/test_imagefont.py::TestImageFont::test_sanity",
"Tests/test_imagefont.py::TestImageFont::test_font_properties",
"Tests/test_imagefont.py::TestImageFont::test_font_with_name",
"Tests/test_imagefont.py::TestImageFont::test_font_with_open_file",
"Tests/test_imagefont.py::TestImageFont::test_non_ascii_path",
"Tests/test_imagefont.py::TestImageFont::test_render_equal",
"Tests/test_imagefont.py::TestImageFont::test_transparent_background",
"Tests/test_imagefont.py::TestImageFont::test_I16",
"Tests/test_imagefont.py::TestImageFont::test_textsize_equal",
"Tests/test_imagefont.py::TestImageFont::test_getlength[text-L-FreeMono.ttf-15-36-36]",
"Tests/test_imagefont.py::TestImageFont::test_getlength[text-1-FreeMono.ttf-15-36-36]",
"Tests/test_imagefont.py::TestImageFont::test_getlength[rrr-L-DejaVuSans/DejaVuSans.ttf-18-21-22.21875]",
"Tests/test_imagefont.py::TestImageFont::test_getlength[rrr-1-DejaVuSans/DejaVuSans.ttf-18-24-22.21875]",
"Tests/test_imagefont.py::TestImageFont::test_getlength[ill-L-OpenSansCondensed-LightItalic.ttf-63-33-31.984375]",
"Tests/test_imagefont.py::TestImageFont::test_getlength[ill-1-OpenSansCondensed-LightItalic.ttf-63-33-31.984375]",
"Tests/test_imagefont.py::TestImageFont::test_render_multiline",
"Tests/test_imagefont.py::TestImageFont::test_render_multiline_text",
"Tests/test_imagefont.py::TestImageFont::test_unknown_align",
"Tests/test_imagefont.py::TestImageFont::test_draw_align",
"Tests/test_imagefont.py::TestImageFont::test_multiline_size",
"Tests/test_imagefont.py::TestImageFont::test_multiline_width",
"Tests/test_imagefont.py::TestImageFont::test_multiline_spacing",
"Tests/test_imagefont.py::TestImageFont::test_rotated_transposed_font",
"Tests/test_imagefont.py::TestImageFont::test_unrotated_transposed_font",
"Tests/test_imagefont.py::TestImageFont::test_rotated_transposed_font_get_mask",
"Tests/test_imagefont.py::TestImageFont::test_unrotated_transposed_font_get_mask",
"Tests/test_imagefont.py::TestImageFont::test_free_type_font_get_name",
"Tests/test_imagefont.py::TestImageFont::test_free_type_font_get_metrics",
"Tests/test_imagefont.py::TestImageFont::test_free_type_font_get_offset",
"Tests/test_imagefont.py::TestImageFont::test_free_type_font_get_mask",
"Tests/test_imagefont.py::TestImageFont::test_load_path_not_found",
"Tests/test_imagefont.py::TestImageFont::test_load_non_font_bytes",
"Tests/test_imagefont.py::TestImageFont::test_default_font",
"Tests/test_imagefont.py::TestImageFont::test_getsize_empty",
"Tests/test_imagefont.py::TestImageFont::test_render_empty",
"Tests/test_imagefont.py::TestImageFont::test_unicode_pilfont",
"Tests/test_imagefont.py::TestImageFont::test_unicode_extended",
"Tests/test_imagefont.py::TestImageFont::test_find_linux_font",
"Tests/test_imagefont.py::TestImageFont::test_find_macos_font",
"Tests/test_imagefont.py::TestImageFont::test_imagefont_getters",
"Tests/test_imagefont.py::TestImageFont::test_getsize_stroke",
"Tests/test_imagefont.py::TestImageFont::test_complex_font_settings",
"Tests/test_imagefont.py::TestImageFont::test_variation_get",
"Tests/test_imagefont.py::TestImageFont::test_variation_set_by_name",
"Tests/test_imagefont.py::TestImageFont::test_variation_set_by_axes",
"Tests/test_imagefont.py::TestImageFont::test_textbbox_non_freetypefont",
"Tests/test_imagefont.py::TestImageFont::test_anchor[ls]",
"Tests/test_imagefont.py::TestImageFont::test_anchor[ms]",
"Tests/test_imagefont.py::TestImageFont::test_anchor[rs]",
"Tests/test_imagefont.py::TestImageFont::test_anchor[ma]",
"Tests/test_imagefont.py::TestImageFont::test_anchor[mt]",
"Tests/test_imagefont.py::TestImageFont::test_anchor[mm]",
"Tests/test_imagefont.py::TestImageFont::test_anchor[mb]",
"Tests/test_imagefont.py::TestImageFont::test_anchor[md]",
"Tests/test_imagefont.py::TestImageFont::test_anchor_multiline[lm-left]",
"Tests/test_imagefont.py::TestImageFont::test_anchor_multiline[lm-center]",
"Tests/test_imagefont.py::TestImageFont::test_anchor_multiline[lm-right]",
"Tests/test_imagefont.py::TestImageFont::test_anchor_multiline[mm-left]",
"Tests/test_imagefont.py::TestImageFont::test_anchor_multiline[mm-center]",
"Tests/test_imagefont.py::TestImageFont::test_anchor_multiline[mm-right]",
"Tests/test_imagefont.py::TestImageFont::test_anchor_multiline[rm-left]",
"Tests/test_imagefont.py::TestImageFont::test_anchor_multiline[rm-center]",
"Tests/test_imagefont.py::TestImageFont::test_anchor_multiline[rm-right]",
"Tests/test_imagefont.py::TestImageFont::test_anchor_multiline[ma-center]",
"Tests/test_imagefont.py::TestImageFont::test_anchor_multiline[md-center]",
"Tests/test_imagefont.py::TestImageFont::test_anchor_invalid",
"Tests/test_imagefont.py::TestImageFont::test_bitmap_font[1]",
"Tests/test_imagefont.py::TestImageFont::test_bitmap_font[2]",
"Tests/test_imagefont.py::TestImageFont::test_bitmap_font[4]",
"Tests/test_imagefont.py::TestImageFont::test_bitmap_font[8]",
"Tests/test_imagefont.py::TestImageFont::test_bitmap_font_stroke",
"Tests/test_imagefont.py::TestImageFont::test_standard_embedded_color",
"Tests/test_imagefont.py::TestImageFont::test_cbdt",
"Tests/test_imagefont.py::TestImageFont::test_cbdt_mask",
"Tests/test_imagefont.py::TestImageFont::test_sbix",
"Tests/test_imagefont.py::TestImageFont::test_sbix_mask",
"Tests/test_imagefont.py::TestImageFont::test_colr",
"Tests/test_imagefont.py::TestImageFont::test_colr_mask",
"Tests/test_imagefont.py::TestImageFont::test_fill_deprecation",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_sanity",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_font_properties",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_font_with_name",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_font_with_open_file",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_non_ascii_path",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_render_equal",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_transparent_background",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_I16",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_textsize_equal",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_getlength[text-L-FreeMono.ttf-15-36-36]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_getlength[text-1-FreeMono.ttf-15-36-36]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_getlength[rrr-L-DejaVuSans/DejaVuSans.ttf-18-21-22.21875]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_getlength[rrr-1-DejaVuSans/DejaVuSans.ttf-18-24-22.21875]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_getlength[ill-L-OpenSansCondensed-LightItalic.ttf-63-33-31.984375]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_getlength[ill-1-OpenSansCondensed-LightItalic.ttf-63-33-31.984375]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_render_multiline",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_render_multiline_text",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_unknown_align",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_draw_align",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_multiline_size",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_multiline_width",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_multiline_spacing",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_rotated_transposed_font",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_unrotated_transposed_font",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_rotated_transposed_font_get_mask",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_unrotated_transposed_font_get_mask",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_free_type_font_get_name",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_free_type_font_get_metrics",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_free_type_font_get_offset",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_free_type_font_get_mask",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_load_path_not_found",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_load_non_font_bytes",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_default_font",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_getsize_empty",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_render_empty",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_unicode_pilfont",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_unicode_extended",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_find_linux_font",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_find_macos_font",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_imagefont_getters",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_getsize_stroke",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_complex_font_settings",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_variation_get",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_variation_set_by_name",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_variation_set_by_axes",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_textbbox_non_freetypefont",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor[ls]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor[ms]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor[rs]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor[ma]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor[mt]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor[mm]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor[mb]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor[md]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor_multiline[lm-left]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor_multiline[lm-center]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor_multiline[lm-right]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor_multiline[mm-left]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor_multiline[mm-center]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor_multiline[mm-right]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor_multiline[rm-left]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor_multiline[rm-center]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor_multiline[rm-right]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor_multiline[ma-center]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor_multiline[md-center]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_anchor_invalid",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_bitmap_font[1]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_bitmap_font[2]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_bitmap_font[4]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_bitmap_font[8]",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_bitmap_font_stroke",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_standard_embedded_color",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_cbdt",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_cbdt_mask",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_sbix",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_sbix_mask",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_colr",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_colr_mask",
"Tests/test_imagefont.py::TestImageFont_RaqmLayout::test_fill_deprecation",
"Tests/test_imagefont.py::test_render_mono_size",
"Tests/test_imagefont.py::test_oom[Tests/fonts/oom-e8e927ba6c0d38274a37c1567560eb33baf74627.ttf]",
"Tests/test_imagefont.py::test_raqm_missing_warning",
"Tests/test_imagefont.py::test_constants_deprecation"
] | [] | MIT-CMU License | 12,678 | 185 | [
"src/PIL/ImageFont.py"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.