instance_id
stringlengths 12
57
| base_commit
stringlengths 40
40
| created_at
stringdate 2015-01-06 14:05:07
2025-04-29 17:56:51
| environment_setup_commit
stringlengths 40
40
| hints_text
stringlengths 0
158k
| patch
stringlengths 261
20.8k
| problem_statement
stringlengths 11
52.5k
| repo
stringlengths 7
53
| test_patch
stringlengths 280
206k
| meta
dict | version
stringclasses 463
values | install_config
dict | requirements
stringlengths 93
34k
⌀ | environment
stringlengths 772
20k
⌀ | FAIL_TO_PASS
sequencelengths 1
856
| FAIL_TO_FAIL
sequencelengths 0
536
| PASS_TO_PASS
sequencelengths 0
7.87k
| PASS_TO_FAIL
sequencelengths 0
92
| license_name
stringclasses 35
values | __index_level_0__
int64 11
21.4k
| num_tokens_patch
int64 103
4.99k
| before_filepaths
sequencelengths 0
14
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
jmwri__ioccontainer-4 | 9155dbf9030df7bd911a5bc93feb397d1f545feb | 2018-02-23 20:50:01 | 9155dbf9030df7bd911a5bc93feb397d1f545feb | diff --git a/ioccontainer/inject.py b/ioccontainer/inject.py
index 162f6e1..98ba622 100644
--- a/ioccontainer/inject.py
+++ b/ioccontainer/inject.py
@@ -50,7 +50,11 @@ def inject_decorator(container: 'Container'):
service = container.get(cls)
if _is_positional_argument(position, parameter, new_args):
- new_args.append(service)
+ if len(new_args) >= position + 1:
+ new_args[position] = service
+ else:
+ new_args.append(service)
+
elif _is_keyword_argument(parameter):
kwargs[parameter.name] = service
else:
@@ -96,7 +100,9 @@ def _default_parameter_provided(parameter: inspect.Parameter) -> bool:
def _argument_provided(position: int, parameter: inspect.Parameter,
args: typing.List, kwargs: typing.Dict) -> bool:
- return position < len(args) or parameter.name in kwargs.keys()
+ if position < len(args) and args[position] is not None:
+ return True
+ return kwargs.get(parameter.name) is not None
def _is_positional_argument(
@@ -106,7 +112,9 @@ def _is_positional_argument(
inspect.Parameter.POSITIONAL_OR_KEYWORD)
if parameter.kind not in positional_types:
return False
- return position == len(args)
+ if position == len(args):
+ return True
+ return position + 1 == len(args) and args[position] is None
def _is_keyword_argument(parameter: inspect.Parameter) -> bool:
| Inject should override value if None
When using the `@inject` decorator, the provided parameter should be overridden if it is `None`. At the moment the provider parameter takes precedence 100% of the time. | jmwri/ioccontainer | diff --git a/tests/test_container.py b/tests/test_container.py
index d76d2e1..f338f75 100644
--- a/tests/test_container.py
+++ b/tests/test_container.py
@@ -148,3 +148,29 @@ def test_injection_to_constructor():
my_class = MyClass('my_test_string')
assert my_class.some_str is 'my_test_string'
assert my_class.get_val() is 1
+
+
+@provider('str_service')
+def provide_str():
+ return 'string service'
+
+
+def test_param_overriding():
+ @inject(string='str_service')
+ def my_fn(string):
+ return string
+
+ assert my_fn() == 'string service'
+ assert my_fn('overridden') == 'overridden'
+ assert my_fn(None) == 'string service'
+
+
+def test_multiple_param_overriding():
+ @inject(s1='str_service', s2='str_service')
+ def my_fn(s1, s2):
+ return s1, s2
+
+ assert my_fn() == ('string service', 'string service')
+ assert my_fn('overridden') == ('overridden', 'string service')
+ assert my_fn(None) == ('string service', 'string service')
+ assert my_fn('overridden', None) == ('overridden', 'string service')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | cachetools==5.5.2
chardet==5.2.0
colorama==0.4.6
coverage==7.8.0
distlib==0.3.9
docopt==0.6.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/jmwri/ioccontainer.git@9155dbf9030df7bd911a5bc93feb397d1f545feb#egg=ioccontainer
packaging @ file:///croot/packaging_1734472117206/work
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pyproject-api==1.9.0
pytest @ file:///croot/pytest_1738938843180/work
pytest-watch==4.2.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
virtualenv==20.29.3
watchdog==6.0.0
| name: ioccontainer
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- cachetools==5.5.2
- chardet==5.2.0
- colorama==0.4.6
- coverage==7.8.0
- distlib==0.3.9
- docopt==0.6.2
- filelock==3.18.0
- platformdirs==4.3.7
- pyproject-api==1.9.0
- pytest-watch==4.2.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- virtualenv==20.29.3
- watchdog==6.0.0
prefix: /opt/conda/envs/ioccontainer
| [
"tests/test_container.py::test_param_overriding",
"tests/test_container.py::test_multiple_param_overriding"
] | [] | [
"tests/test_container.py::test_injection",
"tests/test_container.py::test_singleton",
"tests/test_container.py::test_invalid_scope",
"tests/test_container.py::test_duplicate_provider",
"tests/test_container.py::test_invalid_provider",
"tests/test_container.py::test_invalid_service_name",
"tests/test_container.py::test_no_provider_specified",
"tests/test_container.py::test_multiple_params",
"tests/test_container.py::test_default_provided",
"tests/test_container.py::test_argument_provided",
"tests/test_container.py::test_threaded_provider",
"tests/test_container.py::test_injection_to_constructor"
] | [] | MIT License | 2,200 | 371 | [
"ioccontainer/inject.py"
] |
|
marshmallow-code__marshmallow-744 | 642d18b58d9b42d16b450a30b42aa57ea3859192 | 2018-02-24 22:05:11 | 8e217c8d6fefb7049ab3389f31a8d35824fa2d96 | diff --git a/marshmallow/schema.py b/marshmallow/schema.py
index 2327081a..79bb8ee1 100644
--- a/marshmallow/schema.py
+++ b/marshmallow/schema.py
@@ -836,11 +836,11 @@ class BaseSchema(base.SchemaABC):
if pass_many:
validator = functools.partial(validator, many=many)
if many and not pass_many:
- for idx, item in enumerate(data):
+ for idx, (item, orig) in enumerate(zip(data, original_data)):
try:
- self._unmarshal.run_validator(validator,
- item, original_data, self.fields, many=many,
- index=idx, pass_original=pass_original)
+ self._unmarshal.run_validator(
+ validator, item, orig, self.fields, many=many,
+ index=idx, pass_original=pass_original)
except ValidationError as err:
errors.update(err.messages)
else:
| post_dump is passing a list of objects as original object
Hi,
I think post_dump with pass_original=True should pass the original object related to the data serialized and not a list of objects which this object belongs to.
``` python
from marshmallow import fields, post_dump, Schema
class DeviceSchema(Schema):
id = fields.String()
@post_dump(pass_original=True)
def __post_dump(self, data, obj):
print(obj) # <-- this is a list
devices = [dict(id=1), dict(id=2)]
DeviceSchema().dump(devices, many=True)
```
In the above example, the parameter `obj` is a list of devices rather than the device object itself.
What do you think?
| marshmallow-code/marshmallow | diff --git a/tests/test_decorators.py b/tests/test_decorators.py
index 3fb3f1bb..226e550f 100644
--- a/tests/test_decorators.py
+++ b/tests/test_decorators.py
@@ -365,6 +365,35 @@ class TestValidatesSchemaDecorator:
assert '_schema' in errors['nested']
assert 'foo' not in errors['nested']
+ @pytest.mark.parametrize("data", ([{"foo": 1, "bar": 2}],))
+ @pytest.mark.parametrize(
+ "pass_many,expected_data,expected_original_data",
+ (
+ [True, [{"foo": 1}], [{"foo": 1, "bar": 2}]],
+ [False, {"foo": 1}, {"foo": 1, "bar": 2}],
+ ),
+ )
+ def test_validator_nested_many_pass_original_and_pass_many(
+ self, pass_many, data, expected_data, expected_original_data):
+
+ class NestedSchema(Schema):
+ foo = fields.Int(required=True)
+
+ @validates_schema(pass_many=pass_many, pass_original=True)
+ def validate_schema(self, data, original_data, many=False):
+ assert data == expected_data
+ assert original_data == expected_original_data
+ assert many is pass_many
+ raise ValidationError("Method called")
+
+ class MySchema(Schema):
+ nested = fields.Nested(NestedSchema, required=True, many=True)
+
+ schema = MySchema()
+ errors = schema.validate({"nested": data})
+ error = errors["nested"] if pass_many else errors["nested"][0]
+ assert error["_schema"][0] == "Method called"
+
def test_decorated_validators(self):
class MySchema(Schema):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[reco]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": [],
"python": "3.9",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
flake8==3.5.0
iniconfig==2.1.0
invoke==0.22.0
-e git+https://github.com/marshmallow-code/marshmallow.git@642d18b58d9b42d16b450a30b42aa57ea3859192#egg=marshmallow
mccabe==0.6.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
py==1.11.0
pycodestyle==2.3.1
pyflakes==1.6.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.6.1
pytz==2017.3
simplejson==3.13.2
six==1.17.0
toml==0.10.2
tomli==2.2.1
tox==3.12.1
typing_extensions==4.13.0
virtualenv==20.29.3
| name: marshmallow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- flake8==3.5.0
- iniconfig==2.1.0
- invoke==0.22.0
- mccabe==0.6.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- py==1.11.0
- pycodestyle==2.3.1
- pyflakes==1.6.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.6.1
- pytz==2017.3
- simplejson==3.13.2
- six==1.17.0
- toml==0.10.2
- tomli==2.2.1
- tox==3.12.1
- typing-extensions==4.13.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/marshmallow
| [
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_validator_nested_many_pass_original_and_pass_many[False-expected_data1-expected_original_data1-data0]"
] | [] | [
"tests/test_decorators.py::test_decorated_processors",
"tests/test_decorators.py::TestPassOriginal::test_pass_original_single_no_mutation",
"tests/test_decorators.py::TestPassOriginal::test_pass_original_single_with_mutation",
"tests/test_decorators.py::TestPassOriginal::test_pass_original_many",
"tests/test_decorators.py::test_decorated_processor_inheritance",
"tests/test_decorators.py::test_pre_dump_is_invoked_before_implicit_field_generation",
"tests/test_decorators.py::TestValidatesDecorator::test_validates",
"tests/test_decorators.py::TestValidatesDecorator::test_validates_with_attribute",
"tests/test_decorators.py::TestValidatesDecorator::test_validates_decorator",
"tests/test_decorators.py::TestValidatesDecorator::test_field_not_present",
"tests/test_decorators.py::TestValidatesDecorator::test_precedence",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_validator_nested_many",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_validator_nested_many_pass_original_and_pass_many[True-expected_data0-expected_original_data0-data0]",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_decorated_validators",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_multiple_validators",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_passing_original_data",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_allow_arbitrary_field_names_in_error",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_skip_on_field_errors",
"tests/test_decorators.py::test_decorator_error_handling",
"tests/test_decorators.py::test_decorator_error_handling_with_load[pre_load]",
"tests/test_decorators.py::test_decorator_error_handling_with_load[post_load]",
"tests/test_decorators.py::test_decorator_error_handling_with_dump[pre_dump]",
"tests/test_decorators.py::test_decorator_error_handling_with_dump[post_dump]"
] | [] | MIT License | 2,205 | 228 | [
"marshmallow/schema.py"
] |
|
pre-commit__pre-commit-711 | 29715c9268dc866facf0b8a9cbe21d218d948a7b | 2018-02-24 22:53:14 | ac3a37d1a0e3575bddf23fd9babf6e56202b2988 | diff --git a/pre_commit/commands/autoupdate.py b/pre_commit/commands/autoupdate.py
index ca83a58..666cd11 100644
--- a/pre_commit/commands/autoupdate.py
+++ b/pre_commit/commands/autoupdate.py
@@ -33,9 +33,9 @@ def _update_repo(repo_config, runner, tags_only):
Args:
repo_config - A config for a repository
"""
- repo = Repository.create(repo_config, runner.store)
+ repo_path = runner.store.clone(repo_config['repo'], repo_config['sha'])
- with cwd(repo._repo_path):
+ with cwd(repo_path):
cmd_output('git', 'fetch')
tag_cmd = ('git', 'describe', 'origin/master', '--tags')
if tags_only:
@@ -57,7 +57,7 @@ def _update_repo(repo_config, runner, tags_only):
new_repo = Repository.create(new_config, runner.store)
# See if any of our hooks were deleted with the new commits
- hooks = {hook['id'] for hook in repo.repo_config['hooks']}
+ hooks = {hook['id'] for hook in repo_config['hooks']}
hooks_missing = hooks - (hooks & set(new_repo.manifest_hooks))
if hooks_missing:
raise RepositoryCannotBeUpdatedError(
diff --git a/pre_commit/repository.py b/pre_commit/repository.py
index 3ed160a..624ccd0 100644
--- a/pre_commit/repository.py
+++ b/pre_commit/repository.py
@@ -7,7 +7,6 @@ import os
import pipes
import shutil
import sys
-from collections import defaultdict
import pkg_resources
from cached_property import cached_property
@@ -149,22 +148,11 @@ class Repository(object):
else:
return cls(config, store)
- @cached_property
- def _repo_path(self):
- return self.store.clone(
- self.repo_config['repo'], self.repo_config['sha'],
- )
-
- @cached_property
- def _prefix(self):
- return Prefix(self._repo_path)
-
- def _prefix_from_deps(self, language_name, deps):
- return self._prefix
-
@cached_property
def manifest_hooks(self):
- manifest_path = os.path.join(self._repo_path, C.MANIFEST_FILE)
+ repo, sha = self.repo_config['repo'], self.repo_config['sha']
+ repo_path = self.store.clone(repo, sha)
+ manifest_path = os.path.join(repo_path, C.MANIFEST_FILE)
return {hook['id']: hook for hook in load_manifest(manifest_path)}
@cached_property
@@ -185,21 +173,25 @@ class Repository(object):
for hook in self.repo_config['hooks']
)
- @cached_property
+ def _prefix_from_deps(self, language_name, deps):
+ repo, sha = self.repo_config['repo'], self.repo_config['sha']
+ return Prefix(self.store.clone(repo, sha, deps))
+
def _venvs(self):
- deps_dict = defaultdict(_UniqueList)
- for _, hook in self.hooks:
- deps_dict[(hook['language'], hook['language_version'])].update(
- hook['additional_dependencies'],
- )
ret = []
- for (language, version), deps in deps_dict.items():
- ret.append((self._prefix, language, version, deps))
+ for _, hook in self.hooks:
+ language = hook['language']
+ version = hook['language_version']
+ deps = hook['additional_dependencies']
+ ret.append((
+ self._prefix_from_deps(language, deps),
+ language, version, deps,
+ ))
return tuple(ret)
def require_installed(self):
if not self.__installed:
- _install_all(self._venvs, self.repo_config['repo'], self.store)
+ _install_all(self._venvs(), self.repo_config['repo'], self.store)
self.__installed = True
def run_hook(self, hook, file_args):
@@ -237,19 +229,6 @@ class LocalRepository(Repository):
for hook in self.repo_config['hooks']
)
- @cached_property
- def _venvs(self):
- ret = []
- for _, hook in self.hooks:
- language = hook['language']
- version = hook['language_version']
- deps = hook['additional_dependencies']
- ret.append((
- self._prefix_from_deps(language, deps),
- language, version, deps,
- ))
- return tuple(ret)
-
class MetaRepository(LocalRepository):
@cached_property
@@ -303,14 +282,3 @@ class MetaRepository(LocalRepository):
(hook['id'], _hook(self.manifest_hooks[hook['id']], hook))
for hook in self.repo_config['hooks']
)
-
-
-class _UniqueList(list):
- def __init__(self):
- self._set = set()
-
- def update(self, obj):
- for item in obj:
- if item not in self._set:
- self._set.add(item)
- self.append(item)
diff --git a/pre_commit/store.py b/pre_commit/store.py
index 1311984..7e49c8f 100644
--- a/pre_commit/store.py
+++ b/pre_commit/store.py
@@ -72,9 +72,9 @@ class Store(object):
with contextlib.closing(sqlite3.connect(tmpfile)) as db:
db.executescript(
'CREATE TABLE repos ('
- ' repo CHAR(255) NOT NULL,'
- ' ref CHAR(255) NOT NULL,'
- ' path CHAR(255) NOT NULL,'
+ ' repo TEXT NOT NULL,'
+ ' ref TEXT NOT NULL,'
+ ' path TEXT NOT NULL,'
' PRIMARY KEY (repo, ref)'
');',
)
@@ -101,15 +101,17 @@ class Store(object):
self._create()
self.__created = True
- def _new_repo(self, repo, ref, make_strategy):
+ def _new_repo(self, repo, ref, deps, make_strategy):
self.require_created()
+ if deps:
+ repo = '{}:{}'.format(repo, ','.join(sorted(deps)))
def _get_result():
# Check if we already exist
with sqlite3.connect(self.db_path) as db:
result = db.execute(
'SELECT path FROM repos WHERE repo = ? AND ref = ?',
- [repo, ref],
+ (repo, ref),
).fetchone()
if result:
return result[0]
@@ -137,7 +139,7 @@ class Store(object):
)
return directory
- def clone(self, repo, ref):
+ def clone(self, repo, ref, deps=()):
"""Clone the given url and checkout the specific ref."""
def clone_strategy(directory):
cmd_output(
@@ -151,7 +153,7 @@ class Store(object):
env=no_git_env(),
)
- return self._new_repo(repo, ref, clone_strategy)
+ return self._new_repo(repo, ref, deps, clone_strategy)
def make_local(self, deps):
def make_local_strategy(directory):
@@ -172,8 +174,7 @@ class Store(object):
_git_cmd('commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
return self._new_repo(
- 'local:{}'.format(','.join(sorted(deps))), C.LOCAL_REPO_VERSION,
- make_local_strategy,
+ 'local', C.LOCAL_REPO_VERSION, deps, make_local_strategy,
)
@cached_property
| Same repo but different additional dependencies
I have two Git projects `A` and `B`, and those two use same [pre-commit repo](https://github.com/coldnight/pre-commit-pylint). But those two projects use different additional dependencies:
`.pre-commit-config.yaml` in `A`:
```yaml
- repo: [email protected]:coldnight/pre-commit-pylint.git
sha: 630e2662aabf3236fc62460b163d613c4bd1cfbc
hooks:
- id: pylint-py3k
- id: pylint-score-limit
args:
- --limit=8.5
- --rcfile=./.pylintrc
additional_dependencies:
- enum34; python_version<='3.4'
- mock
```
`.pre-commit-config.yaml` in `B`:
```yaml
- repo: [email protected]:coldnight/pre-commit-pylint.git
sha: 630e2662aabf3236fc62460b163d613c4bd1cfbc
hooks:
- id: pylint-py3k
- id: pylint-score-limit
args:
- --limit=8.5
- --rcfile=./.pylintrc
additional_dependencies:
- enum34; python_version<='3.4'
- requests
```
Here is my problem:
1. First I run `pre-commit` in project `A`, and the environment has installed
2. And then I run `pre-commit` in project `B`, and the environment has installed
3. When back project `A` and run `pre-commit`, the installed environment has removed by above and need another installation(Huge slow!)
Any idea for this? Support different projects that use different home directories? | pre-commit/pre-commit | diff --git a/tests/conftest.py b/tests/conftest.py
index fd3784d..246820e 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -165,12 +165,6 @@ def log_info_mock():
yield mck
[email protected]
-def log_warning_mock():
- with mock.patch.object(logging.getLogger('pre_commit'), 'warning') as mck:
- yield mck
-
-
class FakeStream(object):
def __init__(self):
self.data = io.BytesIO()
diff --git a/tests/repository_test.py b/tests/repository_test.py
index 0123ce4..dea387f 100644
--- a/tests/repository_test.py
+++ b/tests/repository_test.py
@@ -433,7 +433,7 @@ def test_venvs(tempdir_factory, store):
path = make_repo(tempdir_factory, 'python_hooks_repo')
config = make_config_from_repo(path)
repo = Repository.create(config, store)
- venv, = repo._venvs
+ venv, = repo._venvs()
assert venv == (mock.ANY, 'python', python.get_default_version(), [])
@@ -443,50 +443,33 @@ def test_additional_dependencies(tempdir_factory, store):
config = make_config_from_repo(path)
config['hooks'][0]['additional_dependencies'] = ['pep8']
repo = Repository.create(config, store)
- venv, = repo._venvs
+ venv, = repo._venvs()
assert venv == (mock.ANY, 'python', python.get_default_version(), ['pep8'])
@pytest.mark.integration
-def test_additional_dependencies_duplicated(
- tempdir_factory, store, log_warning_mock,
-):
- path = make_repo(tempdir_factory, 'ruby_hooks_repo')
- config = make_config_from_repo(path)
- deps = ['thread_safe', 'tins', 'thread_safe']
- config['hooks'][0]['additional_dependencies'] = deps
- repo = Repository.create(config, store)
- venv, = repo._venvs
- assert venv == (mock.ANY, 'ruby', 'default', ['thread_safe', 'tins'])
-
-
[email protected]
-def test_additional_python_dependencies_installed(tempdir_factory, store):
+def test_additional_dependencies_roll_forward(tempdir_factory, store):
path = make_repo(tempdir_factory, 'python_hooks_repo')
- config = make_config_from_repo(path)
- config['hooks'][0]['additional_dependencies'] = ['mccabe']
- repo = Repository.create(config, store)
- repo.require_installed()
- with python.in_env(repo._prefix, 'default'):
- output = cmd_output('pip', 'freeze', '-l')[1]
- assert 'mccabe' in output
+ config1 = make_config_from_repo(path)
+ repo1 = Repository.create(config1, store)
+ repo1.require_installed()
+ (prefix1, _, version1, _), = repo1._venvs()
+ with python.in_env(prefix1, version1):
+ assert 'mccabe' not in cmd_output('pip', 'freeze', '-l')[1]
[email protected]
-def test_additional_dependencies_roll_forward(tempdir_factory, store):
- path = make_repo(tempdir_factory, 'python_hooks_repo')
- config = make_config_from_repo(path)
- # Run the repo once without additional_dependencies
- repo = Repository.create(config, store)
- repo.require_installed()
- # Now run it with additional_dependencies
- config['hooks'][0]['additional_dependencies'] = ['mccabe']
- repo = Repository.create(config, store)
- repo.require_installed()
- # We should see our additional dependency installed
- with python.in_env(repo._prefix, 'default'):
- output = cmd_output('pip', 'freeze', '-l')[1]
- assert 'mccabe' in output
+ # Make another repo with additional dependencies
+ config2 = make_config_from_repo(path)
+ config2['hooks'][0]['additional_dependencies'] = ['mccabe']
+ repo2 = Repository.create(config2, store)
+ repo2.require_installed()
+ (prefix2, _, version2, _), = repo2._venvs()
+ with python.in_env(prefix2, version2):
+ assert 'mccabe' in cmd_output('pip', 'freeze', '-l')[1]
+
+ # should not have affected original
+ with python.in_env(prefix1, version1):
+ assert 'mccabe' not in cmd_output('pip', 'freeze', '-l')[1]
@xfailif_windows_no_ruby
@@ -499,7 +482,8 @@ def test_additional_ruby_dependencies_installed(
config['hooks'][0]['additional_dependencies'] = ['thread_safe', 'tins']
repo = Repository.create(config, store)
repo.require_installed()
- with ruby.in_env(repo._prefix, 'default'):
+ (prefix, _, version, _), = repo._venvs()
+ with ruby.in_env(prefix, version):
output = cmd_output('gem', 'list', '--local')[1]
assert 'thread_safe' in output
assert 'tins' in output
@@ -516,7 +500,8 @@ def test_additional_node_dependencies_installed(
config['hooks'][0]['additional_dependencies'] = ['lodash']
repo = Repository.create(config, store)
repo.require_installed()
- with node.in_env(repo._prefix, 'default'):
+ (prefix, _, version, _), = repo._venvs()
+ with node.in_env(prefix, version):
output = cmd_output('npm', 'ls', '-g')[1]
assert 'lodash' in output
@@ -532,7 +517,8 @@ def test_additional_golang_dependencies_installed(
config['hooks'][0]['additional_dependencies'] = deps
repo = Repository.create(config, store)
repo.require_installed()
- binaries = os.listdir(repo._prefix.path(
+ (prefix, _, _, _), = repo._venvs()
+ binaries = os.listdir(prefix.path(
helpers.environment_dir(golang.ENVIRONMENT_DIR, 'default'), 'bin',
))
# normalize for windows
@@ -598,8 +584,9 @@ def test_control_c_control_c_on_install(tempdir_factory, store):
repo.run_hook(hook, [])
# Should have made an environment, however this environment is broken!
- envdir = 'py_env-{}'.format(python.get_default_version())
- assert repo._prefix.exists(envdir)
+ (prefix, _, version, _), = repo._venvs()
+ envdir = 'py_env-{}'.format(version)
+ assert prefix.exists(envdir)
# However, it should be perfectly runnable (reinstall after botched
# install)
@@ -616,8 +603,8 @@ def test_invalidated_virtualenv(tempdir_factory, store):
# Simulate breaking of the virtualenv
repo.require_installed()
- version = python.get_default_version()
- libdir = repo._prefix.path('py_env-{}'.format(version), 'lib', version)
+ (prefix, _, version, _), = repo._venvs()
+ libdir = prefix.path('py_env-{}'.format(version), 'lib', version)
paths = [
os.path.join(libdir, p) for p in ('site.py', 'site.pyc', '__pycache__')
]
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_git_commit_hash",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aspy.yaml==1.3.0
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
cfgv==3.3.1
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
identify==2.4.4
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
mccabe==0.7.0
mock==5.2.0
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
-e git+https://github.com/pre-commit/pre-commit.git@29715c9268dc866facf0b8a9cbe21d218d948a7b#egg=pre_commit
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-env==0.6.2
PyYAML==6.0.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.17.1
zipp==3.6.0
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aspy-yaml==1.3.0
- attrs==22.2.0
- cached-property==1.5.2
- cfgv==3.3.1
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- identify==2.4.4
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- mccabe==0.7.0
- mock==5.2.0
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-env==0.6.2
- pyyaml==6.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/pre-commit
| [
"tests/repository_test.py::test_venvs",
"tests/repository_test.py::test_additional_dependencies",
"tests/repository_test.py::test_additional_dependencies_roll_forward",
"tests/repository_test.py::test_additional_node_dependencies_installed",
"tests/repository_test.py::test_control_c_control_c_on_install",
"tests/repository_test.py::test_invalidated_virtualenv"
] | [
"tests/repository_test.py::test_switch_language_versions_doesnt_clobber",
"tests/repository_test.py::test_run_a_ruby_hook",
"tests/repository_test.py::test_run_versioned_ruby_hook",
"tests/repository_test.py::test_run_ruby_hook_with_disable_shared_gems",
"tests/repository_test.py::test_golang_hook",
"tests/repository_test.py::test_additional_ruby_dependencies_installed",
"tests/repository_test.py::test_additional_golang_dependencies_installed",
"tests/repository_test.py::test_local_golang_additional_dependencies"
] | [
"tests/repository_test.py::test_python_hook",
"tests/repository_test.py::test_python_hook_default_version",
"tests/repository_test.py::test_python_hook_args_with_spaces",
"tests/repository_test.py::test_python_hook_weird_setup_cfg",
"tests/repository_test.py::test_versioned_python_hook",
"tests/repository_test.py::test_run_a_node_hook",
"tests/repository_test.py::test_run_versioned_node_hook",
"tests/repository_test.py::test_system_hook_with_spaces",
"tests/repository_test.py::test_missing_executable",
"tests/repository_test.py::test_run_a_script_hook",
"tests/repository_test.py::test_run_hook_with_spaced_args",
"tests/repository_test.py::test_run_hook_with_curly_braced_arguments",
"tests/repository_test.py::TestPygrep::test_grep_hook_matching",
"tests/repository_test.py::TestPygrep::test_grep_hook_case_insensitive",
"tests/repository_test.py::TestPygrep::test_grep_hook_not_matching[nope]",
"tests/repository_test.py::TestPygrep::test_grep_hook_not_matching[foo'bar]",
"tests/repository_test.py::TestPygrep::test_grep_hook_not_matching[^\\\\[INFO\\\\]]",
"tests/repository_test.py::TestPCRE::test_grep_hook_matching",
"tests/repository_test.py::TestPCRE::test_grep_hook_case_insensitive",
"tests/repository_test.py::TestPCRE::test_grep_hook_not_matching[nope]",
"tests/repository_test.py::TestPCRE::test_grep_hook_not_matching[foo'bar]",
"tests/repository_test.py::TestPCRE::test_grep_hook_not_matching[^\\\\[INFO\\\\]]",
"tests/repository_test.py::TestPCRE::test_pcre_hook_many_files",
"tests/repository_test.py::TestPCRE::test_missing_pcre_support",
"tests/repository_test.py::test_cwd_of_hook",
"tests/repository_test.py::test_lots_of_files",
"tests/repository_test.py::test_reinstall",
"tests/repository_test.py::test_really_long_file_paths",
"tests/repository_test.py::test_config_overrides_repo_specifics",
"tests/repository_test.py::test_tags_on_repositories",
"tests/repository_test.py::test_local_repository",
"tests/repository_test.py::test_local_python_repo",
"tests/repository_test.py::test_hook_id_not_present",
"tests/repository_test.py::test_meta_hook_not_present",
"tests/repository_test.py::test_too_new_version",
"tests/repository_test.py::test_versions_ok[0.1.0]",
"tests/repository_test.py::test_versions_ok[1.6.0]",
"tests/repository_test.py::test_manifest_hooks"
] | [] | MIT License | 2,207 | 1,765 | [
"pre_commit/commands/autoupdate.py",
"pre_commit/repository.py",
"pre_commit/store.py"
] |
|
akolar__ogn-lib-10 | 4526578e5fe7e897c6e0f08edfa180e75e88203f | 2018-02-26 13:37:55 | b2b444e1a990e6e84f09b76d93505c2bd9ed2bf5 | diff --git a/ogn_lib/parser.py b/ogn_lib/parser.py
index f43f6cb..a2ca7ea 100644
--- a/ogn_lib/parser.py
+++ b/ogn_lib/parser.py
@@ -627,3 +627,33 @@ class ServerParser(Parser):
"""
return {'comment': comment}
+
+
+class Spot(Parser):
+ """
+ Parser for Spot-formatted APRS messages.
+ """
+
+ __destto__ = ['OGSPOT', 'OGSPOT-1']
+
+ @staticmethod
+ def _parse_protocol_specific(comment):
+ """
+ Parses the comment string from Spot's APRS messages.
+
+ :param str comment: comment string
+ :return: parsed comment
+ :rtype: dict
+ """
+
+ fields = comment.split(' ', maxsplit=2)
+
+ if len(fields) < 3:
+ raise exceptions.ParseError('SPOT comment incorrectly formatted: '
+ 'received {}'.format(comment))
+
+ return {
+ 'id': fields[0],
+ 'model': fields[1],
+ 'status': fields[2]
+ }
| Implement parser for Spot messages (OGSPOT) | akolar/ogn-lib | diff --git a/tests/test_parser.py b/tests/test_parser.py
index cb9b05c..bcd3247 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -440,6 +440,18 @@ class TestNaviter:
assert data['address_type'] is constants.AddressType.naviter
+class TestSpot:
+ def test_parse_protocol_specific(self):
+ data = parser.Spot._parse_protocol_specific('id0-2860357 SPOT3 GOOD')
+ assert data['id'] == 'id0-2860357'
+ assert data['model'] == 'SPOT3'
+ assert data['status'] == 'GOOD'
+
+ def test_parse_protocol_specific_fail(self):
+ with pytest.raises(exceptions.ParseError):
+ parser.Spot._parse_protocol_specific('id0-2860357 SPOT3')
+
+
class TestServerParser:
def test_parse_message_beacon(self, mocker):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 1
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
-e git+https://github.com/akolar/ogn-lib.git@4526578e5fe7e897c6e0f08edfa180e75e88203f#egg=ogn_lib
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: ogn-lib
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/ogn-lib
| [
"tests/test_parser.py::TestSpot::test_parse_protocol_specific",
"tests/test_parser.py::TestSpot::test_parse_protocol_specific_fail"
] | [] | [
"tests/test_parser.py::TestParserBase::test_new_no_id",
"tests/test_parser.py::TestParserBase::test_new_single_id",
"tests/test_parser.py::TestParserBase::test_new_multi_id",
"tests/test_parser.py::TestParserBase::test_no_destto",
"tests/test_parser.py::TestParserBase::test_new_wrong_id",
"tests/test_parser.py::TestParserBase::test_set_default",
"tests/test_parser.py::TestParserBase::test_call_no_parser",
"tests/test_parser.py::TestParser::test_pattern_header",
"tests/test_parser.py::TestParser::test_pattern_header_matches_all",
"tests/test_parser.py::TestParser::test_pattern_location",
"tests/test_parser.py::TestParser::test_pattern_location_matches_all",
"tests/test_parser.py::TestParser::test_pattern_comment_common",
"tests/test_parser.py::TestParser::test_pattern_comment_common_matches_all",
"tests/test_parser.py::TestParser::test_pattern_all",
"tests/test_parser.py::TestParser::test_pattern_all_matches_all",
"tests/test_parser.py::TestParser::test_parse_msg_no_match",
"tests/test_parser.py::TestParser::test_parse_digipeaters",
"tests/test_parser.py::TestParser::test_parse_digipeaters_relayed",
"tests/test_parser.py::TestParser::test_parse_digipeaters_unknown_format",
"tests/test_parser.py::TestParser::test_parse_heading_speed",
"tests/test_parser.py::TestParser::test_parse_heading_speed_both_missing",
"tests/test_parser.py::TestParser::test_parse_heading_speed_null_input",
"tests/test_parser.py::TestParser::test_parse_altitude",
"tests/test_parser.py::TestParser::test_parse_altitude_missing",
"tests/test_parser.py::TestParser::test_parse_attrs",
"tests/test_parser.py::TestParser::test_parse_time_past",
"tests/test_parser.py::TestParser::test_parse_time_future",
"tests/test_parser.py::TestParser::test_parse_datetime",
"tests/test_parser.py::TestParser::test_parse_location_sign",
"tests/test_parser.py::TestParser::test_parse_location_value",
"tests/test_parser.py::TestParser::test_parse_protocol_specific",
"tests/test_parser.py::TestParser::test_get_location_update_func",
"tests/test_parser.py::TestParser::test_update_location_decimal_same",
"tests/test_parser.py::TestParser::test_update_location_decimal_positive",
"tests/test_parser.py::TestParser::test_update_location_decimal_negative",
"tests/test_parser.py::TestParser::test_update_data",
"tests/test_parser.py::TestParser::test_update_data_missing",
"tests/test_parser.py::TestAPRS::test_parse_protocol_specific",
"tests/test_parser.py::TestAPRS::test_parse_id_string",
"tests/test_parser.py::TestNaviter::test_parse_protocol_specific",
"tests/test_parser.py::TestNaviter::test_parse_id_string"
] | [] | MIT License | 2,211 | 269 | [
"ogn_lib/parser.py"
] |
|
akolar__ogn-lib-11 | 5ab4b003315931c1d1f1ac3a9e29532305aa5fff | 2018-02-26 13:39:09 | b2b444e1a990e6e84f09b76d93505c2bd9ed2bf5 | diff --git a/ogn_lib/parser.py b/ogn_lib/parser.py
index a2ca7ea..00093ea 100644
--- a/ogn_lib/parser.py
+++ b/ogn_lib/parser.py
@@ -629,31 +629,32 @@ class ServerParser(Parser):
return {'comment': comment}
-class Spot(Parser):
+class Spider(Parser):
"""
- Parser for Spot-formatted APRS messages.
+ Parser for Spider-formatted APRS messages.
"""
- __destto__ = ['OGSPOT', 'OGSPOT-1']
+ __destto__ = ['OGSPID', 'OGSPID-1']
@staticmethod
def _parse_protocol_specific(comment):
"""
- Parses the comment string from Spot's APRS messages.
+ Parses the comment string from Spider's APRS messages.
:param str comment: comment string
:return: parsed comment
:rtype: dict
"""
- fields = comment.split(' ', maxsplit=2)
+ fields = comment.split(' ', maxsplit=3)
- if len(fields) < 3:
- raise exceptions.ParseError('SPOT comment incorrectly formatted: '
- 'received {}'.format(comment))
+ if len(fields) < 4:
+ raise exceptions.ParseError('Spider comment incorrectly formatted:'
+ ' received {}'.format(comment))
return {
'id': fields[0],
- 'model': fields[1],
- 'status': fields[2]
+ 'signal_strength': fields[1],
+ 'spider_id': fields[2],
+ 'gps_status': fields[3]
}
| Implement parser for Spider messages (OGSPID) | akolar/ogn-lib | diff --git a/tests/test_parser.py b/tests/test_parser.py
index bcd3247..f179511 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -442,14 +442,16 @@ class TestNaviter:
class TestSpot:
def test_parse_protocol_specific(self):
- data = parser.Spot._parse_protocol_specific('id0-2860357 SPOT3 GOOD')
- assert data['id'] == 'id0-2860357'
- assert data['model'] == 'SPOT3'
- assert data['status'] == 'GOOD'
+ data = parser.Spider._parse_protocol_specific('id300234010617040 +19dB'
+ ' LWE 3D')
+ assert data['id'] == 'id300234010617040'
+ assert data['signal_strength'] == '+19dB'
+ assert data['spider_id'] == 'LWE'
+ assert data['gps_status'] == '3D'
def test_parse_protocol_specific_fail(self):
with pytest.raises(exceptions.ParseError):
- parser.Spot._parse_protocol_specific('id0-2860357 SPOT3')
+ parser.Spider._parse_protocol_specific('id300234010617040 +19dB')
class TestServerParser:
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
-e git+https://github.com/akolar/ogn-lib.git@5ab4b003315931c1d1f1ac3a9e29532305aa5fff#egg=ogn_lib
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: ogn-lib
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/ogn-lib
| [
"tests/test_parser.py::TestSpot::test_parse_protocol_specific",
"tests/test_parser.py::TestSpot::test_parse_protocol_specific_fail"
] | [] | [
"tests/test_parser.py::TestParserBase::test_new_no_id",
"tests/test_parser.py::TestParserBase::test_new_single_id",
"tests/test_parser.py::TestParserBase::test_new_multi_id",
"tests/test_parser.py::TestParserBase::test_no_destto",
"tests/test_parser.py::TestParserBase::test_new_wrong_id",
"tests/test_parser.py::TestParserBase::test_set_default",
"tests/test_parser.py::TestParserBase::test_call_no_parser",
"tests/test_parser.py::TestParser::test_pattern_header",
"tests/test_parser.py::TestParser::test_pattern_header_matches_all",
"tests/test_parser.py::TestParser::test_pattern_location",
"tests/test_parser.py::TestParser::test_pattern_location_matches_all",
"tests/test_parser.py::TestParser::test_pattern_comment_common",
"tests/test_parser.py::TestParser::test_pattern_comment_common_matches_all",
"tests/test_parser.py::TestParser::test_pattern_all",
"tests/test_parser.py::TestParser::test_pattern_all_matches_all",
"tests/test_parser.py::TestParser::test_parse_msg_no_match",
"tests/test_parser.py::TestParser::test_parse_digipeaters",
"tests/test_parser.py::TestParser::test_parse_digipeaters_relayed",
"tests/test_parser.py::TestParser::test_parse_digipeaters_unknown_format",
"tests/test_parser.py::TestParser::test_parse_heading_speed",
"tests/test_parser.py::TestParser::test_parse_heading_speed_both_missing",
"tests/test_parser.py::TestParser::test_parse_heading_speed_null_input",
"tests/test_parser.py::TestParser::test_parse_altitude",
"tests/test_parser.py::TestParser::test_parse_altitude_missing",
"tests/test_parser.py::TestParser::test_parse_attrs",
"tests/test_parser.py::TestParser::test_parse_time_past",
"tests/test_parser.py::TestParser::test_parse_time_future",
"tests/test_parser.py::TestParser::test_parse_datetime",
"tests/test_parser.py::TestParser::test_parse_location_sign",
"tests/test_parser.py::TestParser::test_parse_location_value",
"tests/test_parser.py::TestParser::test_parse_protocol_specific",
"tests/test_parser.py::TestParser::test_get_location_update_func",
"tests/test_parser.py::TestParser::test_update_location_decimal_same",
"tests/test_parser.py::TestParser::test_update_location_decimal_positive",
"tests/test_parser.py::TestParser::test_update_location_decimal_negative",
"tests/test_parser.py::TestParser::test_update_data",
"tests/test_parser.py::TestParser::test_update_data_missing",
"tests/test_parser.py::TestAPRS::test_parse_protocol_specific",
"tests/test_parser.py::TestAPRS::test_parse_id_string",
"tests/test_parser.py::TestNaviter::test_parse_protocol_specific",
"tests/test_parser.py::TestNaviter::test_parse_id_string"
] | [] | MIT License | 2,212 | 375 | [
"ogn_lib/parser.py"
] |
|
akolar__ogn-lib-13 | dd7b9bf33caee17a839240a134246881e9c7c32f | 2018-02-26 14:12:12 | b2b444e1a990e6e84f09b76d93505c2bd9ed2bf5 | diff --git a/ogn_lib/parser.py b/ogn_lib/parser.py
index 4df961f..e50eb80 100644
--- a/ogn_lib/parser.py
+++ b/ogn_lib/parser.py
@@ -714,3 +714,32 @@ class Skylines(Parser):
'id': fields[0],
'vertical_speed': int(fields[1][:3]) * FEET_TO_METERS
}
+
+
+class LiveTrack24(Parser):
+ """
+ Parser for LiveTrack24-formatted APRS messages.
+ """
+
+ __destto__ = ['OGLT24', 'OGLT24-1']
+
+ @staticmethod
+ def _parse_protocol_specific(comment):
+ """
+ Parses the comment string from LiveTrack24's APRS messages.
+ :param str comment: comment string
+ :return: parsed comment
+ :rtype: dict
+ """
+
+ fields = comment.split(' ', maxsplit=2)
+
+ if len(fields) < 3:
+ raise exceptions.ParseError('LT24 comment incorrectly formatted:'
+ ' received {}'.format(comment))
+
+ return {
+ 'id': fields[0],
+ 'vertical_speed': int(fields[1][:3]) * FEET_TO_METERS,
+ 'source': fields[2]
+ }
| Implement parser for LiveTrack24 messages (OGLT24) | akolar/ogn-lib | diff --git a/tests/test_parser.py b/tests/test_parser.py
index fa7c7cd..fb5ec8d 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -481,6 +481,23 @@ class TestSkylines:
parser.Skylines._parse_protocol_specific('id1111')
+class TestLT24:
+ def test_parse_protocol_specific(self):
+ data = parser.LiveTrack24._parse_protocol_specific('id25387 +000fpm GPS')
+ assert data['id'] == 'id25387'
+ assert data['vertical_speed'] == 0
+ assert data['source'] == 'GPS'
+
+ data = parser.LiveTrack24._parse_protocol_specific('id25387 +159fpm GPS')
+ assert data['id'] == 'id25387'
+ assert abs(data['vertical_speed'] - 4.57) < 0.1
+ assert data['source'] == 'GPS'
+
+ def test_parse_protocol_specific_fail(self):
+ with pytest.raises(exceptions.ParseError):
+ parser.LiveTrack24._parse_protocol_specific('id11111 GPS')
+
+
class TestServerParser:
def test_parse_message_beacon(self, mocker):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-mock"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
-e git+https://github.com/akolar/ogn-lib.git@dd7b9bf33caee17a839240a134246881e9c7c32f#egg=ogn_lib
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-mock==3.6.1
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: ogn-lib
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- pytest-mock==3.6.1
prefix: /opt/conda/envs/ogn-lib
| [
"tests/test_parser.py::TestLT24::test_parse_protocol_specific",
"tests/test_parser.py::TestLT24::test_parse_protocol_specific_fail"
] | [] | [
"tests/test_parser.py::TestParserBase::test_new_no_id",
"tests/test_parser.py::TestParserBase::test_new_single_id",
"tests/test_parser.py::TestParserBase::test_new_multi_id",
"tests/test_parser.py::TestParserBase::test_no_destto",
"tests/test_parser.py::TestParserBase::test_new_wrong_id",
"tests/test_parser.py::TestParserBase::test_set_default",
"tests/test_parser.py::TestParserBase::test_call",
"tests/test_parser.py::TestParserBase::test_call_server",
"tests/test_parser.py::TestParserBase::test_call_no_parser",
"tests/test_parser.py::TestParserBase::test_call_default",
"tests/test_parser.py::TestParserBase::test_call_failed",
"tests/test_parser.py::TestParser::test_pattern_header",
"tests/test_parser.py::TestParser::test_pattern_header_matches_all",
"tests/test_parser.py::TestParser::test_pattern_location",
"tests/test_parser.py::TestParser::test_pattern_location_matches_all",
"tests/test_parser.py::TestParser::test_pattern_comment_common",
"tests/test_parser.py::TestParser::test_pattern_comment_common_matches_all",
"tests/test_parser.py::TestParser::test_pattern_all",
"tests/test_parser.py::TestParser::test_pattern_all_matches_all",
"tests/test_parser.py::TestParser::test_parse_msg_no_match",
"tests/test_parser.py::TestParser::test_parse_msg_calls",
"tests/test_parser.py::TestParser::test_parse_msg",
"tests/test_parser.py::TestParser::test_parse_msg_full",
"tests/test_parser.py::TestParser::test_parse_msg_delete_update",
"tests/test_parser.py::TestParser::test_parse_msg_comment",
"tests/test_parser.py::TestParser::test_parse_digipeaters",
"tests/test_parser.py::TestParser::test_parse_digipeaters_relayed",
"tests/test_parser.py::TestParser::test_parse_digipeaters_unknown_format",
"tests/test_parser.py::TestParser::test_parse_heading_speed",
"tests/test_parser.py::TestParser::test_parse_heading_speed_both_missing",
"tests/test_parser.py::TestParser::test_parse_heading_speed_null_input",
"tests/test_parser.py::TestParser::test_parse_altitude",
"tests/test_parser.py::TestParser::test_parse_altitude_missing",
"tests/test_parser.py::TestParser::test_parse_attrs",
"tests/test_parser.py::TestParser::test_parse_timestamp_h",
"tests/test_parser.py::TestParser::test_parse_timestamp_z",
"tests/test_parser.py::TestParser::test_parse_time_past",
"tests/test_parser.py::TestParser::test_parse_time_future",
"tests/test_parser.py::TestParser::test_parse_datetime",
"tests/test_parser.py::TestParser::test_parse_location_sign",
"tests/test_parser.py::TestParser::test_parse_location_value",
"tests/test_parser.py::TestParser::test_parse_protocol_specific",
"tests/test_parser.py::TestParser::test_get_location_update_func",
"tests/test_parser.py::TestParser::test_update_location_decimal_same",
"tests/test_parser.py::TestParser::test_update_location_decimal_positive",
"tests/test_parser.py::TestParser::test_update_location_decimal_negative",
"tests/test_parser.py::TestParser::test_call",
"tests/test_parser.py::TestParser::test_update_data",
"tests/test_parser.py::TestParser::test_update_data_missing",
"tests/test_parser.py::TestAPRS::test_parse_protocol_specific",
"tests/test_parser.py::TestAPRS::test_parse_id_string",
"tests/test_parser.py::TestNaviter::test_parse_protocol_specific",
"tests/test_parser.py::TestNaviter::test_parse_id_string",
"tests/test_parser.py::TestSpot::test_parse_protocol_specific",
"tests/test_parser.py::TestSpot::test_parse_protocol_specific_fail",
"tests/test_parser.py::TestSpider::test_parse_protocol_specific",
"tests/test_parser.py::TestSpider::test_parse_protocol_specific_fail",
"tests/test_parser.py::TestSkylines::test_parse_protocol_specific",
"tests/test_parser.py::TestSkylines::test_parse_protocol_specific_fail",
"tests/test_parser.py::TestServerParser::test_parse_message_beacon",
"tests/test_parser.py::TestServerParser::test_parse_message_status",
"tests/test_parser.py::TestServerParser::test_parse_beacon_comment"
] | [] | MIT License | 2,214 | 313 | [
"ogn_lib/parser.py"
] |
|
akolar__ogn-lib-16 | b2b444e1a990e6e84f09b76d93505c2bd9ed2bf5 | 2018-02-26 14:59:17 | b2b444e1a990e6e84f09b76d93505c2bd9ed2bf5 | diff --git a/ogn_lib/parser.py b/ogn_lib/parser.py
index 0546bbb..4ae680a 100644
--- a/ogn_lib/parser.py
+++ b/ogn_lib/parser.py
@@ -153,6 +153,8 @@ class Parser(metaclass=ParserBase):
using Parser.PATTERN_ALL
"""
+ raw_message = cls._preprocess_message(raw_message)
+
match = cls.PATTERN_ALL.match(raw_message)
if not match:
@@ -187,6 +189,18 @@ class Parser(metaclass=ParserBase):
data['raw'] = raw_message
return data
+ @staticmethod
+ def _preprocess_message(message):
+ """
+ Performs additional preprocessing on the received APRS message.
+
+ :param str message: the received message
+ :return: processed message
+ :rtype: str
+ """
+
+ return message
+
@staticmethod
def _parse_digipeaters(digipeaters):
"""
@@ -756,3 +770,11 @@ class LiveTrack24(Parser):
'vertical_speed': Parser._convert_fpm_to_ms(fields[1]),
'source': fields[2]
}
+
+
+class Capturs(Parser):
+ __destto__ = ['OGLT24', 'OGLT24-1']
+
+ @staticmethod
+ def _preprocess_message(message):
+ return message.strip('/')
| Implement parser for Capturs messages (OGCAPT) | akolar/ogn-lib | diff --git a/tests/test_parser.py b/tests/test_parser.py
index 2b99868..c1c0a9c 100644
--- a/tests/test_parser.py
+++ b/tests/test_parser.py
@@ -177,11 +177,13 @@ class TestParser:
mocker.spy(parser.Parser, '_parse_digipeaters')
mocker.spy(parser.Parser, '_parse_heading_speed')
mocker.spy(parser.Parser, '_parse_protocol_specific')
+ mocker.spy(parser.Parser, '_preprocess_message')
- parser.Parser.parse_message(
- 'FLRDD83BC>APRS,qAS,EDLF:/163148h5124.56N/00634.42E\''
- '276/075/A=001551')
+ msg = ('FLRDD83BC>APRS,qAS,EDLF:/163148h5124.56N/00634.42E\''
+ '276/075/A=001551')
+ parser.Parser.parse_message(msg)
+ parser.Parser._preprocess_message.assert_called_once_with(msg)
parser.Parser._parse_timestamp.assert_called_once_with('163148h')
assert parser.Parser._parse_location.call_count == 2
parser.Parser._parse_altitude.assert_called_once_with('001551')
@@ -232,6 +234,10 @@ class TestParser:
assert data['comment']
+ def test_preprocess_message(self):
+ msg = 'asdf'
+ assert parser.Parser._preprocess_message(msg) == msg
+
def test_parse_digipeaters(self):
data = parser.Parser._parse_digipeaters('qAS,RECEIVER')
assert data == {
@@ -512,6 +518,19 @@ class TestLT24:
parser.LiveTrack24._parse_protocol_specific('id11111 GPS')
+class TestCapturs:
+ def test_process(self):
+ parser.Capturs.parse_message(
+ "FLRDDEEF1>OGCAPT,qAS,CAPTURS:/065144h4837.56N/00233.80E'000/000/")
+
+ def test_preprocess(self):
+ msg_original = ("FLRDDEEF1>OGCAPT,qAS,CAPTURS:/065144h4837.56N/"
+ "00233.80E'000/000/")
+ msg = parser.Capturs._preprocess_message(msg_original)
+
+ assert msg == msg_original[:-1]
+
+
class TestServerParser:
def test_parse_message_beacon(self, mocker):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 0.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-mock"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/akolar/ogn-lib.git@b2b444e1a990e6e84f09b76d93505c2bd9ed2bf5#egg=ogn_lib
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-mock==3.14.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: ogn-lib
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- pytest-mock==3.14.0
prefix: /opt/conda/envs/ogn-lib
| [
"tests/test_parser.py::TestParser::test_parse_msg_calls",
"tests/test_parser.py::TestParser::test_preprocess_message",
"tests/test_parser.py::TestCapturs::test_process",
"tests/test_parser.py::TestCapturs::test_preprocess"
] | [] | [
"tests/test_parser.py::TestParserBase::test_new_no_id",
"tests/test_parser.py::TestParserBase::test_new_single_id",
"tests/test_parser.py::TestParserBase::test_new_multi_id",
"tests/test_parser.py::TestParserBase::test_no_destto",
"tests/test_parser.py::TestParserBase::test_new_wrong_id",
"tests/test_parser.py::TestParserBase::test_set_default",
"tests/test_parser.py::TestParserBase::test_call",
"tests/test_parser.py::TestParserBase::test_call_server",
"tests/test_parser.py::TestParserBase::test_call_no_parser",
"tests/test_parser.py::TestParserBase::test_call_default",
"tests/test_parser.py::TestParserBase::test_call_failed",
"tests/test_parser.py::TestParser::test_pattern_header",
"tests/test_parser.py::TestParser::test_pattern_header_matches_all",
"tests/test_parser.py::TestParser::test_pattern_location",
"tests/test_parser.py::TestParser::test_pattern_location_matches_all",
"tests/test_parser.py::TestParser::test_pattern_comment_common",
"tests/test_parser.py::TestParser::test_pattern_comment_common_matches_all",
"tests/test_parser.py::TestParser::test_pattern_all",
"tests/test_parser.py::TestParser::test_pattern_all_matches_all",
"tests/test_parser.py::TestParser::test_parse_msg_no_match",
"tests/test_parser.py::TestParser::test_parse_msg",
"tests/test_parser.py::TestParser::test_parse_msg_full",
"tests/test_parser.py::TestParser::test_parse_msg_delete_update",
"tests/test_parser.py::TestParser::test_parse_msg_comment",
"tests/test_parser.py::TestParser::test_parse_digipeaters",
"tests/test_parser.py::TestParser::test_parse_digipeaters_relayed",
"tests/test_parser.py::TestParser::test_parse_digipeaters_unknown_format",
"tests/test_parser.py::TestParser::test_parse_heading_speed",
"tests/test_parser.py::TestParser::test_parse_heading_speed_both_missing",
"tests/test_parser.py::TestParser::test_parse_heading_speed_null_input",
"tests/test_parser.py::TestParser::test_parse_altitude",
"tests/test_parser.py::TestParser::test_parse_altitude_missing",
"tests/test_parser.py::TestParser::test_parse_attrs",
"tests/test_parser.py::TestParser::test_parse_timestamp_h",
"tests/test_parser.py::TestParser::test_parse_timestamp_z",
"tests/test_parser.py::TestParser::test_parse_time_past",
"tests/test_parser.py::TestParser::test_parse_time_future",
"tests/test_parser.py::TestParser::test_parse_datetime",
"tests/test_parser.py::TestParser::test_parse_location_sign",
"tests/test_parser.py::TestParser::test_parse_location_value",
"tests/test_parser.py::TestParser::test_parse_protocol_specific",
"tests/test_parser.py::TestParser::test_conv_fpm_to_ms",
"tests/test_parser.py::TestParser::test_conv_fpm_to_ms_sign",
"tests/test_parser.py::TestParser::test_get_location_update_func",
"tests/test_parser.py::TestParser::test_update_location_decimal_same",
"tests/test_parser.py::TestParser::test_update_location_decimal_positive",
"tests/test_parser.py::TestParser::test_update_location_decimal_negative",
"tests/test_parser.py::TestParser::test_call",
"tests/test_parser.py::TestParser::test_update_data",
"tests/test_parser.py::TestParser::test_update_data_missing",
"tests/test_parser.py::TestAPRS::test_parse_protocol_specific",
"tests/test_parser.py::TestAPRS::test_parse_id_string",
"tests/test_parser.py::TestNaviter::test_parse_protocol_specific",
"tests/test_parser.py::TestNaviter::test_parse_id_string",
"tests/test_parser.py::TestSpot::test_parse_protocol_specific",
"tests/test_parser.py::TestSpot::test_parse_protocol_specific_fail",
"tests/test_parser.py::TestSpider::test_parse_protocol_specific",
"tests/test_parser.py::TestSpider::test_parse_protocol_specific_fail",
"tests/test_parser.py::TestSkylines::test_parse_protocol_specific",
"tests/test_parser.py::TestSkylines::test_parse_protocol_specific_fail",
"tests/test_parser.py::TestLT24::test_parse_protocol_specific",
"tests/test_parser.py::TestLT24::test_parse_protocol_specific_fail",
"tests/test_parser.py::TestServerParser::test_parse_message_beacon",
"tests/test_parser.py::TestServerParser::test_parse_message_status",
"tests/test_parser.py::TestServerParser::test_parse_beacon_comment"
] | [] | MIT License | 2,216 | 340 | [
"ogn_lib/parser.py"
] |
|
EdinburghGenomics__clarity_scripts-44 | 57d8c8da1958bf1f5769d385c5d679ab4b012294 | 2018-02-26 15:24:39 | 32c21fa719365176a9101a8a7ce72eb07f3ac85d | diff --git a/scripts/populate_review_step.py b/scripts/populate_review_step.py
index 3ba1948..9cccfd0 100644
--- a/scripts/populate_review_step.py
+++ b/scripts/populate_review_step.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python
import datetime
+from egcg_core import util
from cached_property import cached_property
from EPPs.common import StepEPP, RestCommunicationEPP, step_argparser
from EPPs.config import load_config
@@ -18,8 +19,8 @@ class StepPopulator(StepEPP, RestCommunicationEPP):
if io[0]['uri'].samples[0].name == sample_name and io[1]['output-type'] == 'ResultFile'
]
- def check_rest_data_and_artifacts(self, sample_name, selector):
- query_args = {selector: {'sample_id': sample_name}}
+ def check_rest_data_and_artifacts(self, sample_name):
+ query_args = {'where': {'sample_id': sample_name}}
rest_entities = self.get_documents(self.endpoint, **query_args)
artifacts = self.output_artifacts_per_sample(sample_name=sample_name)
if len(rest_entities) != len(artifacts): # in sample review this will be 1, in run review this will be more
@@ -30,6 +31,18 @@ class StepPopulator(StepEPP, RestCommunicationEPP):
)
return rest_entities, artifacts
+ def delivered(self, sample_name):
+ d = {'yes': True, 'no': False}
+ query_args = {'where': {'sample_id': sample_name}}
+ sample = self.get_documents('samples', **query_args)[0]
+ return d.get(sample.get('delivered'))
+
+ def processed(self, sample_name):
+ query_args = {'where': {'sample_id': sample_name}}
+ sample = self.get_documents('samples', **query_args)[0]
+ processing_status = util.query_dict(sample, 'aggregated.most_recent_proc.status')
+ return processing_status == 'finished'
+
def _run(self):
raise NotImplementedError
@@ -51,7 +64,7 @@ class PullInfo(StepPopulator):
self.lims.put_batch(artifacts_to_upload)
def add_artifact_info(self, sample):
- rest_entities, artifacts = self.check_rest_data_and_artifacts(sample.name, 'match')
+ rest_entities, artifacts = self.check_rest_data_and_artifacts(sample.name)
artifacts_to_upload = set()
for i in range(len(rest_entities)):
for art_field, api_field in self.metrics_mapping:
@@ -83,15 +96,16 @@ class PullInfo(StepPopulator):
class PullRunElementInfo(PullInfo):
- endpoint = 'aggregate/run_elements'
+ endpoint = 'run_elements'
metrics_mapping = [
('RE Id', 'run_element_id'),
('RE Nb Reads', 'passing_filter_reads'),
- ('RE Yield', 'clean_yield_in_gb'),
- ('RE Yield Q30', 'clean_yield_q30_in_gb'),
- ('RE %Q30', 'clean_pc_q30'),
+ ('RE Yield', 'aggregated.clean_yield_in_gb'),
+ ('RE Yield Q30', 'aggregated.clean_yield_q30_in_gb'),
+ ('RE %Q30', 'aggregated.clean_pc_q30'),
+ ('RE Coverage', 'coverage.mean'),
('RE Estimated Duplicate Rate', 'lane_pc_optical_dups'),
- ('RE %Adapter', 'pc_adapter'),
+ ('RE %Adapter', 'aggregated.pc_adaptor'),
('RE Review status', 'reviewed'),
('RE Review Comment', 'review_comments'),
('RE Review date', 'review_date'),
@@ -102,7 +116,6 @@ class PullRunElementInfo(PullInfo):
def assess_sample(self, sample):
artifacts_to_upload = set()
-
artifacts = self.output_artifacts_per_sample(sample_name=sample.name)
un_reviewed_artifacts = [a for a in artifacts if a.udf.get('RE Review status') not in ['pass', 'fail']]
if un_reviewed_artifacts:
@@ -111,36 +124,69 @@ class PullRunElementInfo(PullInfo):
# Artifacts that pass the review
pass_artifacts = [a for a in artifacts if a.udf.get('RE Review status') == 'pass']
-
# Artifacts that fail the review
fail_artifacts = [a for a in artifacts if a.udf.get('RE Review status') == 'fail']
+ # Artifacts that are new
+ new_artifacts = [a for a in artifacts if a.udf.get('RE previous Useable') not in ['yes', 'no']]
- target_yield = float(sample.udf.get('Yield for Quoted Coverage (Gb)'))
- good_re_yield = sum([float(a.udf.get('RE Yield Q30')) for a in pass_artifacts])
+ # skip samples which have been delivered, mark any new REs as such, not changing older RE comments
+ if self.delivered(sample.name):
+ for a in new_artifacts:
+ a.udf['RE Useable Comment'] = 'AR: Delivered'
+ a.udf['RE Useable'] = 'no'
- # Just the right amount of good yield: take it all
- if target_yield < good_re_yield < target_yield * 2:
- for a in pass_artifacts:
- a.udf['RE Useable'] = 'yes'
- a.udf['RE Useable Comment'] = 'AR: Good yield'
- for a in fail_artifacts:
+ for a in pass_artifacts + fail_artifacts:
+ if a.udf.get('RE previous Useable Comment') and a.udf.get('RE previous Useable'):
+ a.udf['RE Useable Comment'] = a.udf.get('RE previous Useable Comment')
+ a.udf['RE Useable'] = a.udf.get('RE previous Useable')
+
+ artifacts_to_upload.update(artifacts)
+ return artifacts_to_upload
+
+ # skip samples which have been processed, mark any new REs as such, not changing older RE comments
+ if self.processed(sample.name):
+ for a in pass_artifacts + fail_artifacts:
+ if a.udf.get('RE previous Useable Comment') and a.udf.get('RE previous Useable'):
+ a.udf['RE Useable Comment'] = a.udf.get('RE previous Useable Comment')
+ a.udf['RE Useable'] = a.udf.get('RE previous Useable')
+
+ for a in new_artifacts:
+ a.udf['RE Useable Comment'] = 'AR: Sample already processed'
a.udf['RE Useable'] = 'no'
- a.udf['RE Useable Comment'] = 'AR: Failed and not needed'
+
artifacts_to_upload.update(artifacts)
+ return artifacts_to_upload
+
+ target_yield = float(sample.udf.get('Required Yield (Gb)'))
+ good_re_yield = sum([float(a.udf.get('RE Yield')) for a in pass_artifacts])
+
+ # Increase target coverage by 5% to resolve borderline cases
+ target_coverage = 1.05 * sample.udf.get('Coverage (X)')
+ obtained_coverage = float(sum([a.udf.get('RE Coverage') for a in pass_artifacts]))
# Too much good yield limit to the best quality ones
- elif good_re_yield > target_yield * 2:
+ if good_re_yield > target_yield * 2 and obtained_coverage > target_coverage:
# Too much yield: sort the good artifact by quality
pass_artifacts.sort(key=lambda x: x.udf.get('RE %Q30'), reverse=True)
current_yield = 0
for a in pass_artifacts:
- current_yield += float(a.udf.get('RE Yield Q30'))
+ current_yield += float(a.udf.get('RE Yield'))
if current_yield < target_yield * 2:
a.udf['RE Useable'] = 'yes'
a.udf['RE Useable Comment'] = 'AR: Good yield'
else:
a.udf['RE Useable'] = 'no'
- a.udf['RE Useable Comment'] = 'AR: To much good yield'
+ a.udf['RE Useable Comment'] = 'AR: Too much good yield'
+ for a in fail_artifacts:
+ a.udf['RE Useable'] = 'no'
+ a.udf['RE Useable Comment'] = 'AR: Failed and not needed'
+ artifacts_to_upload.update(artifacts)
+
+ # Just the right amount of good yield: take it all
+ elif target_yield < good_re_yield < target_yield * 2 or obtained_coverage > target_coverage:
+ for a in pass_artifacts:
+ a.udf['RE Useable'] = 'yes'
+ a.udf['RE Useable Comment'] = 'AR: Good yield'
for a in fail_artifacts:
a.udf['RE Useable'] = 'no'
a.udf['RE Useable Comment'] = 'AR: Failed and not needed'
@@ -153,16 +199,16 @@ class PullRunElementInfo(PullInfo):
class PullSampleInfo(PullInfo):
- endpoint = 'aggregate/samples'
+ endpoint = 'samples'
metrics_mapping = [
- ('SR Yield (Gb)', 'clean_yield_in_gb'),
- ('SR %Q30', 'clean_pc_q30'),
- ('SR % Mapped', 'pc_mapped_reads'),
- ('SR % Duplicates', 'pc_duplicate_reads'),
- ('SR Mean Coverage', 'coverage.mean'),
- ('SR Species Found', 'species_contamination'),
- ('SR Sex Check Match', 'gender_match'),
- ('SR Genotyping Match', 'genotype_match'),
+ ('SR Yield (Gb)', 'aggregated.clean_yield_in_gb'),
+ ('SR %Q30', 'aggregated.clean_pc_q30'),
+ ('SR % Mapped', 'aggregated.pc_mapped_reads'),
+ ('SR % Duplicates', 'aggregated.pc_duplicate_reads'),
+ ('SR Mean Coverage', 'aggregated.mean_coverage'),
+ ('SR Species Found', 'matching_species'),
+ ('SR Sex Check Match', 'aggregated.gender_match'),
+ ('SR Genotyping Match', 'aggregated.genotype_match'),
('SR Freemix', 'sample_contamination.freemix'),
('SR Review Status', 'reviewed'),
('SR Review Comments', 'review_comments'),
@@ -192,9 +238,9 @@ class PullSampleInfo(PullInfo):
def field_from_entity(self, entity, api_field):
# TODO: remove once Rest API has a sensible field for species found
- if api_field == 'species_contamination':
- species = entity[api_field]['contaminant_unique_mapped']
- return ', '.join(k for k in sorted(species) if species[k] > 500)
+ if api_field == 'matching_species':
+ species = entity[api_field]
+ return ', '.join(species)
return super().field_from_entity(entity, api_field)
@@ -214,7 +260,7 @@ class PushInfo(StepPopulator):
_ = self.output_artifacts
for sample in self.samples:
self.info('Pushing data for sample %s', sample.name)
- rest_entities, artifacts = self.check_rest_data_and_artifacts(sample.name, 'where')
+ rest_entities, artifacts = self.check_rest_data_and_artifacts(sample.name)
rest_api_data = {}
for e in rest_entities:
rest_api_data[e[self.api_id_field]] = e
| Add new rules to sample assessment in Run review
The sample assessment is using the require yield Q30 instead of the yield and %Q30 which makes it inaccurate in some cases. Change to use Yield and %Q30
- [ ] Add strategy to deal with sample that have been delivered already
- [ ] Add strategy to protect previous call when additional data is generated but not needed assuming the resulting coverage met requirement.
- [ ] Add strategy to take coverage into account.
| EdinburghGenomics/clarity_scripts | diff --git a/tests/test_populate_review_step.py b/tests/test_populate_review_step.py
index d1b2eea..6e6c8e0 100644
--- a/tests/test_populate_review_step.py
+++ b/tests/test_populate_review_step.py
@@ -1,7 +1,7 @@
from pyclarity_lims.entities import Artifact
from scripts import populate_review_step as p
from tests.test_common import TestEPP, NamedMock
-from unittest.mock import Mock, patch, PropertyMock
+from unittest.mock import Mock, patch, PropertyMock, call
class TestPopulator(TestEPP):
@@ -13,7 +13,7 @@ class TestPopulator(TestEPP):
self.epp_cls,
'samples',
new_callable=PropertyMock(
- return_value=[NamedMock(real_name='a_sample', udf={'Yield for Quoted Coverage (Gb)': 95})]
+ return_value=[NamedMock(real_name='a_sample', udf={'Required Yield (Gb)': 95, 'Coverage (X)': 30})]
)
)
self.patched_lims = patch.object(self.epp_cls, 'lims', new_callable=PropertyMock)
@@ -30,37 +30,49 @@ class TestPopulator(TestEPP):
class TestPullRunElementInfo(TestPopulator):
epp_cls = p.PullRunElementInfo
fake_rest_entity = {
+ 'aggregated': {'clean_yield_in_gb': 20,
+ 'clean_yield_q30_in_gb': 15,
+ 'clean_pc_q30': 75,
+ 'pc_adaptor': 1.2},
'run_element_id': 'id',
'passing_filter_reads': 120000000,
- 'clean_yield_in_gb': 20,
- 'clean_yield_q30_in_gb': 15,
- 'clean_pc_q30': 75,
'lane_pc_optical_dups': 10,
- 'pc_adapter': 1.2,
'reviewed': 'pass',
'review_comments': 'alright',
- 'review_date': '12_02_2107_12:43:24'
+ 'review_date': '12_02_2107_12:43:24',
}
expected_udfs = {
'RE Id': 'id',
'RE Nb Reads': 120000000,
'RE Yield': 20,
'RE Yield Q30': 15,
+ 'RE Coverage': 34.2,
'RE %Q30': 75,
'RE Estimated Duplicate Rate': 10,
'RE %Adapter': 1.2,
'RE Review status': 'pass',
'RE Review Comment': 'alright',
- 'RE Review date': '2107-02-12'
+ 'RE Review date': '2107-02-12',
+ 'RE Useable': 'yes',
+ 'RE Useable Comment': 'AR: Good yield'
}
def test_pull(self):
+
+ patched_output_artifacts_per_sample = patch.object(
+ self.epp_cls,
+ 'output_artifacts_per_sample',
+ return_value=[Mock(spec=Artifact, udf={'RE Coverage': 34.2}, samples=[NamedMock(real_name='a_sample')])]
+ )
+
with self.patched_lims as pl, self.patched_samples, self.patched_get_docs as pg, \
- self.patched_output_artifacts_per_sample as poa:
+ patched_output_artifacts_per_sample as poa:
self.epp.run()
- assert pg.call_count == 1
- pg.assert_called_with(self.epp.endpoint, match={'sample_id': 'a_sample'})
+ assert pg.call_count == 3
+ assert pg.call_args_list == [call('run_elements', where={'sample_id': 'a_sample'}),
+ call('samples', where={'sample_id': 'a_sample'}),
+ call('samples', where={'sample_id': 'a_sample'})]
# Check that the udfs have been added
assert dict(poa.return_value[0].udf) == self.expected_udfs
@@ -72,16 +84,16 @@ class TestPullRunElementInfo(TestPopulator):
def patch_output_artifact(output_artifacts):
return patch.object(self.epp_cls, 'output_artifacts_per_sample', return_value=output_artifacts)
- sample = NamedMock(real_name='a_sample', udf={'Yield for Quoted Coverage (Gb)': 95})
+ sample = NamedMock(real_name='a_sample', udf={'Required Yield (Gb)': 95, 'Coverage (X)': 30})
patched_output_artifacts_per_sample = patch_output_artifact([
- Mock(spec=Artifact, udf={'RE Yield Q30': 115, 'RE %Q30': 75, 'RE Review status': 'pass'}),
- Mock(spec=Artifact, udf={'RE Yield Q30': 95, 'RE %Q30': 85, 'RE Review status': 'pass'}),
- Mock(spec=Artifact, udf={'RE Yield Q30': 15, 'RE %Q30': 70, 'RE Review status': 'fail'}),
+ Mock(spec=Artifact, udf={'RE Yield': 115, 'RE %Q30': 75, 'RE Review status': 'pass', 'RE Coverage': 35.2}),
+ Mock(spec=Artifact, udf={'RE Yield': 95, 'RE %Q30': 85, 'RE Review status': 'pass', 'RE Coverage': 36.7}),
+ Mock(spec=Artifact, udf={'RE Yield': 15, 'RE %Q30': 70, 'RE Review status': 'fail', 'RE Coverage': 34.1}),
])
- with patched_output_artifacts_per_sample as poa:
+ with patched_output_artifacts_per_sample as poa, self.patched_get_docs as pg:
self.epp.assess_sample(sample)
assert poa.return_value[0].udf['RE Useable'] == 'no'
- assert poa.return_value[0].udf['RE Useable Comment'] == 'AR: To much good yield'
+ assert poa.return_value[0].udf['RE Useable Comment'] == 'AR: Too much good yield'
assert poa.return_value[1].udf['RE Useable'] == 'yes'
assert poa.return_value[1].udf['RE Useable Comment'] == 'AR: Good yield'
@@ -90,38 +102,61 @@ class TestPullRunElementInfo(TestPopulator):
assert poa.return_value[2].udf['RE Useable Comment'] == 'AR: Failed and not needed'
patched_output_artifacts_per_sample = patch_output_artifact([
- Mock(spec=Artifact, udf={'RE Yield Q30': 115, 'RE %Q30': 85, 'RE Review status': 'pass'}),
- Mock(spec=Artifact, udf={'RE Yield Q30': 15, 'RE %Q30': 70, 'RE Review status': 'fail'}),
+ Mock(spec=Artifact, udf={'RE Yield': 115, 'RE %Q30': 85, 'RE Review status': 'pass', 'RE Coverage': 35.2}),
+ Mock(spec=Artifact, udf={'RE Yield': 15, 'RE %Q30': 70, 'RE Review status': 'fail', 'RE Coverage': 33.6}),
])
- with patched_output_artifacts_per_sample as poa:
+ with patched_output_artifacts_per_sample as poa, self.patched_get_docs as pg:
self.epp.assess_sample(sample)
assert poa.return_value[0].udf['RE Useable'] == 'yes'
assert poa.return_value[0].udf['RE Useable Comment'] == 'AR: Good yield'
+
assert poa.return_value[1].udf['RE Useable'] == 'no'
assert poa.return_value[1].udf['RE Useable Comment'] == 'AR: Failed and not needed'
+ patched_output_artifacts_per_sample = patch_output_artifact([
+ Mock(spec=Artifact, udf={'RE Yield': 115, 'RE %Q30': 85, 'RE Review status': 'pass', 'RE Coverage': 35.2}),
+ Mock(spec=Artifact, udf={'RE Yield': 15, 'RE %Q30': 70, 'RE Review status': 'fail', 'RE Coverage': 33.6}),
+ ])
+
+ delivered = 'scripts.populate_review_step.PullRunElementInfo.delivered'
+ processed = 'scripts.populate_review_step.PullRunElementInfo.processed'
+ patched_delivered = patch(delivered, return_value=True)
+ pathed_processed = patch(processed, return_value=True)
+
+ with patched_output_artifacts_per_sample as poa, self.patched_get_docs as pg, patched_delivered:
+ self.epp.assess_sample(sample)
+ assert poa.return_value[0].udf['RE Useable'] == 'no'
+ assert poa.return_value[0].udf['RE Useable Comment'] == 'AR: Delivered'
+ assert poa.return_value[1].udf['RE Useable'] == 'no'
+ assert poa.return_value[1].udf['RE Useable Comment'] == 'AR: Delivered'
+
+ with patched_output_artifacts_per_sample as poa, self.patched_get_docs as pg, pathed_processed:
+ self.epp.assess_sample(sample)
+ assert poa.return_value[0].udf['RE Useable'] == 'no'
+ assert poa.return_value[0].udf['RE Useable Comment'] == 'AR: Sample already processed'
+ assert poa.return_value[1].udf['RE Useable'] == 'no'
+ assert poa.return_value[1].udf['RE Useable Comment'] == 'AR: Sample already processed'
+
def test_field_from_entity(self):
entity = {'this': {'that': 'other'}}
assert self.epp.field_from_entity(entity, 'this.that') == 'other'
assert entity == {'this': {'that': 'other'}} # not changed
-class TestPullSampleInfo(TestPullRunElementInfo):
+class TestPullSampleInfo(TestPopulator):
epp_cls = p.PullSampleInfo
fake_rest_entity = {
'sample_id': 'a_sample',
'user_sample_id': 'a_user_sample_id',
'clean_yield_in_gb': 5,
- 'clean_pc_q30': 70,
- 'pc_mapped_reads': 75,
- 'pc_duplicate_reads': 5,
- 'coverage': {'mean': 30},
- 'species_contamination': {
- 'contaminant_unique_mapped': {'Homo sapiens': 70000, 'Thingius thingy': 501, 'Sus scrofa': 499}
- },
- 'gender_match': 'Match',
- 'genotype_match': 'Match',
+ 'aggregated': {'clean_pc_q30': 70,
+ 'pc_mapped_reads': 75,
+ 'pc_duplicate_reads': 5,
+ 'mean_coverage': 30,
+ 'gender_match': 'Match',
+ 'genotype_match': 'Match'},
+ 'matching_species': ['Homo sapiens', 'Thingius thingy'],
'sample_contamination': {'freemix': 0.1},
'reviewed': 'pass',
'review_comments': 'alright',
@@ -162,7 +197,7 @@ class TestPullSampleInfo(TestPullRunElementInfo):
assert poa.return_value[1].udf['SR Useable Comments'] == 'AR: Review failed'
def test_field_from_entity(self):
- obs = self.epp.field_from_entity(self.fake_rest_entity, 'species_contamination')
+ obs = self.epp.field_from_entity(self.fake_rest_entity, 'matching_species')
assert obs == 'Homo sapiens, Thingius thingy'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asana==0.6.7
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
-e git+https://github.com/EdinburghGenomics/clarity_scripts.git@57d8c8da1958bf1f5769d385c5d679ab4b012294#egg=clarity_scripts
EGCG-Core==0.8.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==2.8
MarkupSafe==2.0.1
oauthlib==3.2.2
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyclarity-lims==0.4.8
pyparsing==3.1.4
pytest==7.0.1
PyYAML==6.0.1
requests==2.14.2
requests-oauthlib==0.8.0
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: clarity_scripts
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asana==0.6.7
- attrs==22.2.0
- cached-property==1.5.2
- egcg-core==0.8.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==2.8
- markupsafe==2.0.1
- oauthlib==3.2.2
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyclarity-lims==0.4.8
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==6.0.1
- requests==2.14.2
- requests-oauthlib==0.8.0
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/clarity_scripts
| [
"tests/test_populate_review_step.py::TestPullRunElementInfo::test_assess_sample",
"tests/test_populate_review_step.py::TestPullRunElementInfo::test_pull",
"tests/test_populate_review_step.py::TestPullSampleInfo::test_field_from_entity"
] | [] | [
"tests/test_populate_review_step.py::TestEPP::test_init",
"tests/test_populate_review_step.py::TestPopulator::test_init",
"tests/test_populate_review_step.py::TestPullRunElementInfo::test_field_from_entity",
"tests/test_populate_review_step.py::TestPullRunElementInfo::test_init",
"tests/test_populate_review_step.py::TestPullSampleInfo::test_assess_sample",
"tests/test_populate_review_step.py::TestPullSampleInfo::test_init",
"tests/test_populate_review_step.py::TestPushRunElementInfo::test_init",
"tests/test_populate_review_step.py::TestPushRunElementInfo::test_push",
"tests/test_populate_review_step.py::TestPushSampleInfo::test_init",
"tests/test_populate_review_step.py::TestPushSampleInfo::test_push"
] | [] | MIT License | 2,217 | 2,636 | [
"scripts/populate_review_step.py"
] |
|
discos__simulators-125 | 8f491d8dda1a217d9233b0f4680e11c52dafaa2a | 2018-02-26 21:26:14 | 8f491d8dda1a217d9233b0f4680e11c52dafaa2a | coveralls:
[](https://coveralls.io/builds/15706170)
Coverage increased (+0.002%) to 98.68% when pulling **f70ca7456ba0dacfd653fcdaf2fd2dea8abfe113 on fix-issue-124** into **8f491d8dda1a217d9233b0f4680e11c52dafaa2a on master**.
| diff --git a/simulators/active_surface.py b/simulators/active_surface.py
index 5bea129..afd751a 100644
--- a/simulators/active_surface.py
+++ b/simulators/active_surface.py
@@ -579,7 +579,7 @@ class System(ListeningSystem):
self.drivers[params[0]].current_position
)
- val = utils.binary_to_bytes(binary_position)
+ val = utils.binary_to_bytes(binary_position, little_endian=False)
if params[1] == 0xFA:
retval += val
@@ -642,7 +642,10 @@ class System(ListeningSystem):
else:
return self.byte_nak
else:
- frequency = utils.bytes_to_int([chr(x) for x in params[2]])
+ frequency = utils.bytes_to_int(
+ [chr(x) for x in params[2]],
+ little_endian=False
+ )
if frequency >= 20 and frequency <= 10000:
if params[0] == -1:
@@ -669,7 +672,10 @@ class System(ListeningSystem):
else:
return self.byte_nak
else:
- frequency = utils.bytes_to_int([chr(x) for x in params[2]])
+ frequency = utils.bytes_to_int(
+ [chr(x) for x in params[2]],
+ little_endian=False
+ )
if frequency >= 20 and frequency <= 10000:
if params[0] == -1:
@@ -714,7 +720,8 @@ class System(ListeningSystem):
return self.byte_nak
else:
reference_position = utils.bytes_to_int(
- [chr(x) for x in params[2]]
+ [chr(x) for x in params[2]],
+ little_endian=False
)
if params[0] == -1:
@@ -811,7 +818,10 @@ class System(ListeningSystem):
else:
return self.byte_nak
else:
- absolute_position = utils.bytes_to_int([chr(x) for x in params[2]])
+ absolute_position = utils.bytes_to_int(
+ [chr(x) for x in params[2]],
+ little_endian=False
+ )
if params[0] == -1:
for driver in self.drivers:
@@ -829,7 +839,10 @@ class System(ListeningSystem):
else:
return self.byte_nak
else:
- relative_position = utils.bytes_to_int([chr(x) for x in params[2]])
+ relative_position = utils.bytes_to_int(
+ [chr(x) for x in params[2]],
+ little_endian=False
+ )
if params[0] == -1:
for driver in self.drivers:
@@ -867,7 +880,10 @@ class System(ListeningSystem):
else:
return self.byte_nak
else:
- velocity = utils.bytes_to_int([chr(x) for x in params[2]])
+ velocity = utils.bytes_to_int(
+ [chr(x) for x in params[2]],
+ little_endian=False
+ )
if velocity > 100000 or velocity < -100000:
if params[0] == -1:
diff --git a/simulators/acu.py b/simulators/acu.py
index 2beeb1a..6faa96e 100644
--- a/simulators/acu.py
+++ b/simulators/acu.py
@@ -17,8 +17,8 @@ from simulators.common import ListeningSystem, SendingSystem
servers = []
servers.append((('127.0.0.1', 13000), ('127.0.0.1', 13001), ()))
-start_flag = b'\x1D\xFC\xCF\x1A'
-end_flag = b'\xA1\xFC\xCF\xD1'
+start_flag = b'\x1A\xCF\xFC\x1D'
+end_flag = b'\xD1\xCF\xFC\xA1'
class System(ListeningSystem, SendingSystem):
@@ -80,7 +80,7 @@ class System(ListeningSystem, SendingSystem):
return False
if len(self.msg) == 8:
- self.msg_length = utils.bytes_to_int(self.msg[-4:])
+ self.msg_length = utils.bytes_to_uint(self.msg[-4:])
if len(self.msg) == 12:
macro_cmd_counter = utils.bytes_to_uint(self.msg[-4:])
diff --git a/simulators/acu_status/acu_utils.py b/simulators/acu_status/acu_utils.py
index 4abd82e..8c46654 100644
--- a/simulators/acu_status/acu_utils.py
+++ b/simulators/acu_status/acu_utils.py
@@ -1,8 +1,8 @@
import time
from simulators import utils
-start_flag = b'\x1D\xFC\xCF\x1A'
-end_flag = b'\xA1\xFC\xCF\xD1'
+start_flag = b'\x1A\xCF\xFC\x1D'
+end_flag = b'\xD1\xCF\xFC\xA1'
class ModeCommand(object):
diff --git a/simulators/utils.py b/simulators/utils.py
index b839bd4..20c4ad4 100644
--- a/simulators/utils.py
+++ b/simulators/utils.py
@@ -121,10 +121,10 @@ def int_to_twos(val, n_bytes=4):
return ("{0:0>%s}" % n_bits).format(binary_string)
-def binary_to_bytes(binary_string):
+def binary_to_bytes(binary_string, little_endian=True):
"""Convert a binary string in a string of bytes.
- >>> binary_to_bytes('0110100001100101011011000110110001101111')
+ >>> binary_to_bytes('0110100001100101011011000110110001101111', False)
'\x68\x65\x6C\x6C\x6F'
"""
@@ -133,31 +133,37 @@ def binary_to_bytes(binary_string):
for i in range(0, len(binary_string), 8):
byte_string += chr(int(binary_string[i:i + 8], 2))
- return byte_string
+ return byte_string[::-1] if little_endian else byte_string
-def bytes_to_int(byte_string):
+def bytes_to_int(byte_string, little_endian=True):
"""Convert a string of bytes to an integer (like C atoi function).
- >>> bytes_to_int(b'hello')
+ >>> bytes_to_int(b'hello', False)
448378203247
"""
binary_string = ''
+ if little_endian:
+ byte_string = byte_string[::-1]
+
for char in byte_string:
binary_string += bin(ord(char))[2:].zfill(8)
return twos_to_int(binary_string)
-def bytes_to_uint(byte_string):
+def bytes_to_uint(byte_string, little_endian=True):
"""Convert a string of bytes to an unsigned integer.
- >>> bytes_to_uint(b'hi')
+ >>> bytes_to_uint(b'hi', little_endian=False)
26729
"""
binary_string = ''
+ if little_endian:
+ byte_string = byte_string[::-1]
+
for char in byte_string:
binary_string += bin(ord(char))[2:].zfill(8)
@@ -198,32 +204,35 @@ def real_to_binary(num, precision=1):
)
-def real_to_bytes(num, precision=1):
+def real_to_bytes(num, precision=1, little_endian=True):
"""Return the bytestring representation of a floating-point number
(IEEE 754 standard).
- >>> [hex(ord(x)) for x in real_to_bytes(436.56, 1)]
+ >>> [hex(ord(x)) for x in real_to_bytes(436.56, 1, False)]
['0x43', '0xda', '0x47', '0xae']
- >>> [hex(ord(x)) for x in real_to_bytes(436.56, 2)]
+ >>> [hex(ord(x)) for x in real_to_bytes(436.56, 2, False)]
['0x40', '0x7b', '0x48', '0xf5', '0xc2', '0x8f', '0x5c', '0x29']
"""
binary_number = real_to_binary(num, precision)
- return binary_to_bytes(binary_number)
+ return binary_to_bytes(binary_number, little_endian=little_endian)
-def bytes_to_real(bytes_real, precision=1):
+def bytes_to_real(bytes_real, precision=1, little_endian=True):
"""Return the floating-point representation (IEEE 754 standard)
of bytestring number.
- >>> round(bytes_to_real('\x43\xDA\x47\xAE', 1), 2)
+ >>> round(bytes_to_real('\x43\xDA\x47\xAE', 1, False), 2)
436.56
- >>> round(bytes_to_real('\x40\x7B\x48\xF5\xC2\x8F\x5C\x29', 2), 2)
+ >>> round(bytes_to_real('\x40\x7B\x48\xF5\xC2\x8F\x5C\x29', 2, False), 2)
436.56
"""
+ if little_endian:
+ bytes_real = bytes_real[::-1]
+
if precision == 1:
return struct.unpack('!f', bytes_real)[0]
elif precision == 2:
@@ -235,20 +244,20 @@ def bytes_to_real(bytes_real, precision=1):
)
-def int_to_bytes(val, n_bytes=4):
+def int_to_bytes(val, n_bytes=4, little_endian=True):
"""Return the bytestring representation of a given signed integer.
- >>> [hex(ord(x)) for x in int_to_bytes(354)]
+ >>> [hex(ord(x)) for x in int_to_bytes(354, little_endian=False)]
['0x0', '0x0', '0x1', '0x62']
"""
- return binary_to_bytes(int_to_twos(val, n_bytes))
+ return binary_to_bytes(int_to_twos(val, n_bytes), little_endian)
-def uint_to_bytes(val, n_bytes=4):
+def uint_to_bytes(val, n_bytes=4, little_endian=True):
"""Return the bytestring representation of a given unsigned integer.
- >>> [hex(ord(x)) for x in uint_to_bytes(657)]
+ >>> [hex(ord(x)) for x in uint_to_bytes(657, little_endian=False)]
['0x0', '0x0', '0x2', '0x91']
"""
@@ -262,7 +271,10 @@ def uint_to_bytes(val, n_bytes=4):
% (val, min_range, max_range)
)
- return binary_to_bytes(bin(val)[2:].zfill(n_bytes * 8))
+ return binary_to_bytes(
+ bin(val)[2:].zfill(n_bytes * 8),
+ little_endian=little_endian
+ )
def sign(number):
| ACU commands and status use little-endian byte order, the current implementation uses big-endian. | discos/simulators | diff --git a/tests/test_acu.py b/tests/test_acu.py
index 674856f..f305c94 100644
--- a/tests/test_acu.py
+++ b/tests/test_acu.py
@@ -20,7 +20,7 @@ class TestACU(unittest.TestCase):
def test_status_message_length(self):
status = self.system.get_message()
- msg_length = utils.bytes_to_int(status[4:8])
+ msg_length = utils.bytes_to_uint(status[4:8])
self.assertEqual(msg_length, 813)
def test_duplicated_macro_command_counter(self):
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 86f8db5..6763dbe 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -68,7 +68,7 @@ class TestServer(unittest.TestCase):
def test_right_binary_to_bytes(self):
"""Convert a binary string into a string of bytes."""
binary_string = '00000101000110100010100011010010'
- byte_string = utils.binary_to_bytes(binary_string)
+ byte_string = utils.binary_to_bytes(binary_string, little_endian=False)
expected_byte_string = b'\x05\x1A\x28\xD2'
self.assertEqual(byte_string, expected_byte_string)
@@ -81,7 +81,7 @@ class TestServer(unittest.TestCase):
def test_right_bytes_to_int(self):
"""Convert a string of bytes into an integer (like C atoi function)."""
byte_string = b'\x00\x00\xFA\xFF'
- result = utils.bytes_to_int(byte_string)
+ result = utils.bytes_to_int(byte_string, little_endian=False)
expected_result = 64255
self.assertEqual(result, expected_result)
@@ -137,13 +137,13 @@ class TestServer(unittest.TestCase):
def test_real_to_bytes_single_precision(self):
"""Convert a real number to a string of bytes."""
number = 45.12371938725634
- result = utils.real_to_bytes(number)
+ result = utils.real_to_bytes(number, little_endian=False)
expected_result = b'\x42\x34\x7E\xB0'
self.assertEqual(result, expected_result)
def test_real_to_bytes_double_precision(self):
number = 3.14159265358979323846264338327950288419716939937510582097494
- result = utils.real_to_bytes(number, 2)
+ result = utils.real_to_bytes(number, precision=2, little_endian=False)
expected_result = b'\x40\x09\x21\xFB\x54\x44\x2D\x18'
self.assertEqual(result, expected_result)
@@ -155,13 +155,17 @@ class TestServer(unittest.TestCase):
def test_bytes_to_real_single_precision(self):
"""Convert a string of bytes to a floating point number."""
byte_string = b'\x42\x34\x7E\xB0'
- result = utils.bytes_to_real(byte_string)
+ result = utils.bytes_to_real(byte_string, little_endian=False)
expected_result = 45.12371826171875
self.assertEqual(result, expected_result)
def test_bytes_to_real_double_precision(self):
byte_string = b'\x40\x09\x21\xFB\x54\x44\x2D\x18'
- result = utils.bytes_to_real(byte_string, 2)
+ result = utils.bytes_to_real(
+ byte_string,
+ precision=2,
+ little_endian=False
+ )
expected_result = (
3.14159265358979323846264338327950288419716939937510582097494
)
@@ -175,13 +179,13 @@ class TestServer(unittest.TestCase):
def test_int_to_bytes_positive(self):
"""Convert a signed integer to a string of bytes."""
number = 232144
- result = utils.int_to_bytes(number)
+ result = utils.int_to_bytes(number, little_endian=False)
expected_result = b'\x00\x03\x8A\xD0'
self.assertEqual(result, expected_result)
def test_int_to_bytes_negative(self):
number = -4522764
- result = utils.int_to_bytes(number)
+ result = utils.int_to_bytes(number, little_endian=False)
expected_result = b'\xFF\xBA\xFC\xF4'
self.assertEqual(result, expected_result)
@@ -199,7 +203,7 @@ class TestServer(unittest.TestCase):
def test_uint_to_bytes(self):
"""Convert an unsigned integer to a string of bytes."""
number = 1284639736
- result = utils.uint_to_bytes(number)
+ result = utils.uint_to_bytes(number, little_endian=False)
expected_result = b'\x4C\x92\x0B\xF8'
self.assertEqual(result, expected_result)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"numpy>=1.16.0",
"pandas>=1.0.0"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/discos/simulators.git@8f491d8dda1a217d9233b0f4680e11c52dafaa2a#egg=discos_simulators
exceptiongroup==1.2.2
iniconfig==2.1.0
numpy==2.0.2
packaging==24.2
pandas==2.2.3
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
scipy==1.13.1
six==1.17.0
tomli==2.2.1
tzdata==2025.2
| name: simulators
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scipy==1.13.1
- six==1.17.0
- tomli==2.2.1
- tzdata==2025.2
prefix: /opt/conda/envs/simulators
| [
"tests/test_utils.py::TestServer::test_bytes_to_real_double_precision",
"tests/test_utils.py::TestServer::test_bytes_to_real_single_precision"
] | [
"tests/test_acu.py::TestACU::test_duplicated_macro_command_counter",
"tests/test_acu.py::TestACU::test_mode_command_active",
"tests/test_acu.py::TestACU::test_mode_command_azimuth",
"tests/test_acu.py::TestACU::test_mode_command_drive_to_stow",
"tests/test_acu.py::TestACU::test_mode_command_drive_to_stow_wrong_parameters",
"tests/test_acu.py::TestACU::test_mode_command_elevation",
"tests/test_acu.py::TestACU::test_mode_command_inactive",
"tests/test_acu.py::TestACU::test_mode_command_interlock",
"tests/test_acu.py::TestACU::test_mode_command_preset_absolute",
"tests/test_acu.py::TestACU::test_mode_command_preset_absolute_wrong_parameters",
"tests/test_acu.py::TestACU::test_mode_command_preset_relative",
"tests/test_acu.py::TestACU::test_mode_command_preset_relative_wrong_parameters",
"tests/test_acu.py::TestACU::test_mode_command_program_track",
"tests/test_acu.py::TestACU::test_mode_command_reset",
"tests/test_acu.py::TestACU::test_mode_command_slew",
"tests/test_acu.py::TestACU::test_mode_command_slew_wrong_parameters",
"tests/test_acu.py::TestACU::test_mode_command_slew_zero_speed",
"tests/test_acu.py::TestACU::test_mode_command_stop",
"tests/test_acu.py::TestACU::test_mode_command_stow",
"tests/test_acu.py::TestACU::test_mode_command_stow_wrong_position",
"tests/test_acu.py::TestACU::test_mode_command_unknown_mode_id",
"tests/test_acu.py::TestACU::test_mode_command_unknown_subsystem",
"tests/test_acu.py::TestACU::test_mode_command_unstow",
"tests/test_acu.py::TestACU::test_mode_command_without_activate",
"tests/test_acu.py::TestACU::test_mode_command_wrong_state_active",
"tests/test_acu.py::TestACU::test_multiple_commands_wrong_count",
"tests/test_acu.py::TestACU::test_parse_correct_end_flag",
"tests/test_acu.py::TestACU::test_parse_wrong_end_flag",
"tests/test_acu.py::TestACU::test_parse_wrong_start_flag",
"tests/test_acu.py::TestACU::test_program_track_command_add_entries",
"tests/test_acu.py::TestACU::test_program_track_command_add_entries_empty_table",
"tests/test_acu.py::TestACU::test_program_track_command_add_entries_wrong_start_time",
"tests/test_acu.py::TestACU::test_program_track_command_load_new_table",
"tests/test_acu.py::TestACU::test_program_track_execution",
"tests/test_acu.py::TestACU::test_program_track_out_of_range_rate",
"tests/test_acu.py::TestACU::test_program_track_too_long_sequence",
"tests/test_acu.py::TestACU::test_program_track_too_short_sequence",
"tests/test_acu.py::TestACU::test_program_track_unknown_subsystem",
"tests/test_acu.py::TestACU::test_program_track_wrong_delta_time",
"tests/test_acu.py::TestACU::test_program_track_wrong_first_relative_time",
"tests/test_acu.py::TestACU::test_program_track_wrong_interpolation_mode",
"tests/test_acu.py::TestACU::test_program_track_wrong_load_mode",
"tests/test_acu.py::TestACU::test_program_track_wrong_parameter_id",
"tests/test_acu.py::TestACU::test_program_track_wrong_sequence_length",
"tests/test_acu.py::TestACU::test_program_track_wrong_subsequent_relative_time",
"tests/test_acu.py::TestACU::test_program_track_wrong_tracking_mode",
"tests/test_acu.py::TestACU::test_status_message_length",
"tests/test_acu.py::TestACU::test_unknown_command",
"tests/test_acu.py::TestACU::test_utils_get_command_counter",
"tests/test_utils.py::TestServer::test_day_milliseconds",
"tests/test_utils.py::TestServer::test_int_to_bytes_negative",
"tests/test_utils.py::TestServer::test_int_to_bytes_positive",
"tests/test_utils.py::TestServer::test_int_to_bytes_wrong",
"tests/test_utils.py::TestServer::test_mjd_to_date",
"tests/test_utils.py::TestServer::test_mjd_to_date_old_date",
"tests/test_utils.py::TestServer::test_real_to_binary_double_precision",
"tests/test_utils.py::TestServer::test_real_to_binary_single_precision",
"tests/test_utils.py::TestServer::test_real_to_bytes_double_precision",
"tests/test_utils.py::TestServer::test_real_to_bytes_single_precision",
"tests/test_utils.py::TestServer::test_right_binary_to_bytes",
"tests/test_utils.py::TestServer::test_right_bytes_to_int",
"tests/test_utils.py::TestServer::test_right_bytes_to_uint",
"tests/test_utils.py::TestServer::test_sign_negative",
"tests/test_utils.py::TestServer::test_sign_positive",
"tests/test_utils.py::TestServer::test_sign_zero",
"tests/test_utils.py::TestServer::test_uint_to_bytes",
"tests/test_utils.py::TestServer::test_uint_to_bytes_wrong",
"tests/test_utils.py::TestServer::test_wrong_binary_to_bytes",
"tests/test_utils.py::TestServer::test_wrong_bytes_to_int",
"tests/test_utils.py::TestServer::test_wrong_bytes_to_uint",
"tests/test_utils.py::TestServer::test_wrong_datatype_sign",
"tests/test_utils.py::TestServer::test_wrong_real_to_binary"
] | [
"tests/test_acu.py::TestACU::test_duplicated_command_counter",
"tests/test_acu.py::TestACU::test_utils_macro_command_append",
"tests/test_acu.py::TestACU::test_utils_macro_command_wrong_type_init",
"tests/test_acu.py::TestACU::test_utils_program_track_command_wrong_entry",
"tests/test_acu.py::TestACU::test_utils_program_track_get_empty_table",
"tests/test_utils.py::TestServer::test_binary_complement_longer_mask",
"tests/test_utils.py::TestServer::test_binary_complement_shorter_equal_mask",
"tests/test_utils.py::TestServer::test_bytes_to_real_unknown_precision",
"tests/test_utils.py::TestServer::test_int_to_bytes_out_of_range",
"tests/test_utils.py::TestServer::test_int_to_twos",
"tests/test_utils.py::TestServer::test_mjd_given_date",
"tests/test_utils.py::TestServer::test_mjd_now",
"tests/test_utils.py::TestServer::test_mjd_old_date",
"tests/test_utils.py::TestServer::test_out_of_range_int_to_twos",
"tests/test_utils.py::TestServer::test_real_to_binary_unknown_precision",
"tests/test_utils.py::TestServer::test_real_to_bytes_unknown_precision",
"tests/test_utils.py::TestServer::test_right_binary_complement",
"tests/test_utils.py::TestServer::test_right_checksum",
"tests/test_utils.py::TestServer::test_right_twos_to_int",
"tests/test_utils.py::TestServer::test_uint_to_bytes_out_of_range",
"tests/test_utils.py::TestServer::test_wrong_binary_complement",
"tests/test_utils.py::TestServer::test_wrong_checksum",
"tests/test_utils.py::TestServer::test_wrong_twos_to_int",
"tests/test_utils.py::TestServer::test_wrong_type_binary_complement",
"tests/test_utils.py::TestServer::test_wrong_type_mask_binary_complement"
] | [] | null | 2,219 | 2,719 | [
"simulators/active_surface.py",
"simulators/acu.py",
"simulators/acu_status/acu_utils.py",
"simulators/utils.py"
] |
pika__pika-980 | 7f81bc721807c5416726016838398804c07effbd | 2018-02-26 22:59:43 | 7b6d7983db021ae4b84d08ea9cee4b8f960ada43 | vitaly-krugl: Ready for review | diff --git a/pika/adapters/blocking_connection.py b/pika/adapters/blocking_connection.py
index b1fb7dc..be1db63 100644
--- a/pika/adapters/blocking_connection.py
+++ b/pika/adapters/blocking_connection.py
@@ -542,22 +542,24 @@ class BlockingConnection(object):
self.add_timeout(0, user_callback)
- def _on_connection_blocked(self, user_callback, method_frame):
+ def _on_connection_blocked(self, user_callback, _impl, method_frame):
"""Handle Connection.Blocked notification from RabbitMQ broker
:param callable user_callback: callback passed to
`add_on_connection_blocked_callback`
+ :param SelectConnection _impl:
:param pika.frame.Method method_frame: method frame having `method`
member of type `pika.spec.Connection.Blocked`
"""
self._ready_events.append(
_ConnectionBlockedEvt(user_callback, method_frame))
- def _on_connection_unblocked(self, user_callback, method_frame):
+ def _on_connection_unblocked(self, user_callback, _impl, method_frame):
"""Handle Connection.Unblocked notification from RabbitMQ broker
:param callable user_callback: callback passed to
`add_on_connection_unblocked_callback`
+ :param SelectConnection _impl:
:param pika.frame.Method method_frame: method frame having `method`
member of type `pika.spec.Connection.Blocked`
"""
@@ -595,13 +597,14 @@ class BlockingConnection(object):
See also `ConnectionParameters.blocked_connection_timeout`.
:param method callback: Callback to call on `Connection.Blocked`,
- having the signature `callback(pika.frame.Method)`, where the
- method frame's `method` member is of type
- `pika.spec.Connection.Blocked`
+ having the signature `callback(connection, pika.frame.Method)`,
+ where connection is the `BlockingConnection` instance and the method
+ frame's `method` member is of type `pika.spec.Connection.Blocked`
"""
self._impl.add_on_connection_blocked_callback(
- functools.partial(self._on_connection_blocked, callback))
+ functools.partial(self._on_connection_blocked,
+ functools.partial(callback, self)))
def add_on_connection_unblocked_callback(self, callback):
"""Add a callback to be notified when RabbitMQ has sent a
@@ -609,14 +612,15 @@ class BlockingConnection(object):
to start publishing again. The callback will be passed the
`Connection.Unblocked` method frame.
- :param method callback: Callback to call on
- `Connection.Unblocked`, having the signature
- `callback(pika.frame.Method)`, where the method frame's
- `method` member is of type `pika.spec.Connection.Unblocked`
+ :param method callback: Callback to call on Connection.Unblocked`,
+ having the signature `callback(connection, pika.frame.Method)`,
+ where connection is the `BlockingConnection` instance and the method
+ frame's `method` member is of type `pika.spec.Connection.Unblocked`
"""
self._impl.add_on_connection_unblocked_callback(
- functools.partial(self._on_connection_unblocked, callback))
+ functools.partial(self._on_connection_unblocked,
+ functools.partial(callback, self)))
def add_timeout(self, deadline, callback):
"""Create a single-shot timer to fire after deadline seconds. Do not
diff --git a/pika/connection.py b/pika/connection.py
index c784cc2..25ec506 100644
--- a/pika/connection.py
+++ b/pika/connection.py
@@ -6,6 +6,7 @@ import ast
import sys
import collections
import copy
+import functools
import logging
import math
import numbers
@@ -1018,11 +1019,9 @@ class Connection(object):
# pylint: disable=R0201
ON_CONNECTION_BACKPRESSURE = '_on_connection_backpressure'
- ON_CONNECTION_BLOCKED = '_on_connection_blocked'
ON_CONNECTION_CLOSED = '_on_connection_closed'
ON_CONNECTION_ERROR = '_on_connection_error'
ON_CONNECTION_OPEN = '_on_connection_open'
- ON_CONNECTION_UNBLOCKED = '_on_connection_unblocked'
CONNECTION_CLOSED = 0
CONNECTION_INIT = 1
CONNECTION_PROTOCOL = 2
@@ -1146,19 +1145,21 @@ class Connection(object):
instead of relying on back pressure throttling. The callback
will be passed the ``Connection.Blocked`` method frame.
- TODO Also pass the connection as the callback's first arg
-
See also `ConnectionParameters.blocked_connection_timeout`.
:param method callback: Callback to call on `Connection.Blocked`,
- having the signature `callback(pika.frame.Method)`, where the
- method frame's `method` member is of type
+ having the signature `callback(connection, pika.frame.Method)`,
+ where the method frame's `method` member is of type
`pika.spec.Connection.Blocked`
"""
if not callable(callback):
raise TypeError('callback should be a function or method.')
- self.callbacks.add(0, spec.Connection.Blocked, callback, False)
+
+ self.callbacks.add(0,
+ spec.Connection.Blocked,
+ functools.partial(callback, self),
+ one_shot=False)
def add_on_connection_unblocked_callback(self, callback):
"""Add a callback to be notified when RabbitMQ has sent a
@@ -1166,17 +1167,19 @@ class Connection(object):
to start publishing again. The callback will be passed the
``Connection.Unblocked`` method frame.
- TODO Also pass the connection as the callback's first arg
-
:param method callback: Callback to call on
`Connection.Unblocked`, having the signature
- `callback(pika.frame.Method)`, where the method frame's
+ `callback(connection, pika.frame.Method)`, where the method frame's
`method` member is of type `pika.spec.Connection.Unblocked`
"""
if not callable(callback):
raise TypeError('callback should be a function or method.')
- self.callbacks.add(0, spec.Connection.Unblocked, callback, False)
+
+ self.callbacks.add(0,
+ spec.Connection.Unblocked,
+ functools.partial(callback, self),
+ one_shot=False)
def add_on_open_callback(self, callback):
"""Add a callback notification when the connection has opened. The
@@ -1789,7 +1792,7 @@ class Connection(object):
self._on_terminate(InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT,
'Blocked connection timeout expired')
- def _on_connection_blocked(self, method_frame):
+ def _on_connection_blocked(self, _connection, method_frame):
"""Handle Connection.Blocked notification from RabbitMQ broker
:param pika.frame.Method method_frame: method frame having `method`
@@ -1808,7 +1811,7 @@ class Connection(object):
self.params.blocked_connection_timeout,
self._on_blocked_connection_timeout)
- def _on_connection_unblocked(self, method_frame):
+ def _on_connection_unblocked(self, _connection, method_frame):
"""Handle Connection.Unblocked notification from RabbitMQ broker
:param pika.frame.Method method_frame: method frame having `method`
| Pass connection instance as first arg of callbacks for add_on_connection_blocked_callback and add_on_connection_unblocked_callback
Targeting 1.0.0 | pika/pika | diff --git a/tests/acceptance/async_adapter_tests.py b/tests/acceptance/async_adapter_tests.py
index d27a625..0539596 100644
--- a/tests/acceptance/async_adapter_tests.py
+++ b/tests/acceptance/async_adapter_tests.py
@@ -445,9 +445,11 @@ class TestBlockedConnectionTimesOut(AsyncTestCase, AsyncAdapters): # pylint: di
def begin(self, channel):
# Simulate Connection.Blocked
- channel.connection._on_connection_blocked(pika.frame.Method(
- 0,
- pika.spec.Connection.Blocked('Testing blocked connection timeout')))
+ channel.connection._on_connection_blocked(
+ channel.connection,
+ pika.frame.Method(0,
+ pika.spec.Connection.Blocked(
+ 'Testing blocked connection timeout')))
def on_closed(self, connection, reply_code, reply_text):
"""called when the connection has finished closing"""
@@ -471,15 +473,16 @@ class TestBlockedConnectionUnblocks(AsyncTestCase, AsyncAdapters): # pylint: di
def begin(self, channel):
# Simulate Connection.Blocked
- channel.connection._on_connection_blocked(pika.frame.Method(
- 0,
- pika.spec.Connection.Blocked(
- 'Testing blocked connection unblocks')))
+ channel.connection._on_connection_blocked(
+ channel.connection,
+ pika.frame.Method(0,
+ pika.spec.Connection.Blocked(
+ 'Testing blocked connection unblocks')))
# Simulate Connection.Unblocked
- channel.connection._on_connection_unblocked(pika.frame.Method(
- 0,
- pika.spec.Connection.Unblocked()))
+ channel.connection._on_connection_unblocked(
+ channel.connection,
+ pika.frame.Method(0, pika.spec.Connection.Unblocked()))
# Schedule shutdown after blocked connection timeout would expire
channel.connection.add_timeout(0.005, self.on_cleanup_timer)
diff --git a/tests/acceptance/blocking_adapter_test.py b/tests/acceptance/blocking_adapter_test.py
index 5623922..03b4681 100644
--- a/tests/acceptance/blocking_adapter_test.py
+++ b/tests/acceptance/blocking_adapter_test.py
@@ -401,27 +401,34 @@ class TestConnectionRegisterForBlockAndUnblock(BlockingTestCaseBase):
# NOTE: I haven't figured out yet how to coerce RabbitMQ to emit
# Connection.Block and Connection.Unblock from the test, so we'll
- # just call the registration functions for now, to make sure that
- # registration doesn't crash
-
- connection.add_on_connection_blocked_callback(lambda frame: None)
+ # just call the registration functions for now and simulate incoming
+ # blocked/unblocked frames
blocked_buffer = []
- evt = blocking_connection._ConnectionBlockedEvt(
- lambda f: blocked_buffer.append("blocked"),
- pika.frame.Method(1, pika.spec.Connection.Blocked('reason')))
- repr(evt)
- evt.dispatch()
- self.assertEqual(blocked_buffer, ["blocked"])
+ connection.add_on_connection_blocked_callback(
+ lambda conn, frame: blocked_buffer.append((conn, frame)))
+ # Simulate dispatch of blocked connection
+ blocked_frame = pika.frame.Method(
+ 0,
+ pika.spec.Connection.Blocked('reason'))
+ connection._impl._process_frame(blocked_frame)
+ connection.sleep(0) # facilitate dispatch of pending events
+ self.assertEqual(len(blocked_buffer), 1)
+ conn, frame = blocked_buffer[0]
+ self.assertIs(conn, connection)
+ self.assertIs(frame, blocked_frame)
unblocked_buffer = []
- connection.add_on_connection_unblocked_callback(lambda frame: None)
- evt = blocking_connection._ConnectionUnblockedEvt(
- lambda f: unblocked_buffer.append("unblocked"),
- pika.frame.Method(1, pika.spec.Connection.Unblocked()))
- repr(evt)
- evt.dispatch()
- self.assertEqual(unblocked_buffer, ["unblocked"])
+ connection.add_on_connection_unblocked_callback(
+ lambda conn, frame: unblocked_buffer.append((conn, frame)))
+ # Simulate dispatch of unblocked connection
+ unblocked_frame = pika.frame.Method(0, pika.spec.Connection.Unblocked())
+ connection._impl._process_frame(unblocked_frame)
+ connection.sleep(0) # facilitate dispatch of pending events
+ self.assertEqual(len(unblocked_buffer), 1)
+ conn, frame = unblocked_buffer[0]
+ self.assertIs(conn, connection)
+ self.assertIs(frame, unblocked_frame)
class TestBlockedConnectionTimeout(BlockingTestCaseBase):
@@ -436,9 +443,11 @@ class TestBlockedConnectionTimeout(BlockingTestCaseBase):
# simulate it for now
# Simulate Connection.Blocked
- conn._impl._on_connection_blocked(pika.frame.Method(
- 0,
- pika.spec.Connection.Blocked('TestBlockedConnectionTimeout')))
+ conn._impl._on_connection_blocked(
+ conn._impl,
+ pika.frame.Method(
+ 0,
+ pika.spec.Connection.Blocked('TestBlockedConnectionTimeout')))
# Wait for connection teardown
with self.assertRaises(pika.exceptions.ConnectionClosed) as excCtx:
diff --git a/tests/unit/blocking_connection_tests.py b/tests/unit/blocking_connection_tests.py
index f303023..5fda321 100644
--- a/tests/unit/blocking_connection_tests.py
+++ b/tests/unit/blocking_connection_tests.py
@@ -15,6 +15,19 @@ import pika.channel
from pika.exceptions import AMQPConnectionError, ChannelClosed
+# Disable protected-access
+# pylint: disable=W0212
+
+# Disable missing-docstring
+# pylint: disable=C0111
+
+# Disable invalid-name
+# pylint: disable=C0103
+
+# Disable no-self-use
+# pylint: disable=R0201
+
+
class BlockingConnectionMockTemplate(blocking_connection.BlockingConnection):
pass
@@ -325,3 +338,25 @@ class BlockingConnectionTests(unittest.TestCase):
# and the raised error must then looks like:
self.assertEqual('Connection to 127.0.0.1:5672 failed: timeout',
str(ctx.exception))
+
+ def test_connection_blocked_evt(self):
+ blocked_buffer = []
+ frame = pika.frame.Method(0, pika.spec.Connection.Blocked('reason'))
+ evt = blocking_connection._ConnectionBlockedEvt(
+ blocked_buffer.append,
+ frame)
+ repr(evt)
+ evt.dispatch()
+ self.assertEqual(len(blocked_buffer), 1)
+ self.assertIs(blocked_buffer[0], frame)
+
+ def test_connection_unblocked_evt(self):
+ unblocked_buffer = []
+ frame = pika.frame.Method(0, pika.spec.Connection.Unblocked())
+ evt = blocking_connection._ConnectionUnblockedEvt(
+ unblocked_buffer.append,
+ frame)
+ repr(evt)
+ evt.dispatch()
+ self.assertEqual(len(unblocked_buffer), 1)
+ self.assertIs(unblocked_buffer[0], frame)
diff --git a/tests/unit/connection_tests.py b/tests/unit/connection_tests.py
index 38db1d7..9d99314 100644
--- a/tests/unit/connection_tests.py
+++ b/tests/unit/connection_tests.py
@@ -678,6 +678,36 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
if frame_type == frame.Heartbeat:
self.assertTrue(self.connection.heartbeat.received.called)
+ def test_add_on_connection_blocked_callback(self):
+ blocked_buffer = []
+ self.connection.add_on_connection_blocked_callback(
+ lambda conn, frame: blocked_buffer.append((conn, frame)))
+
+ # Simulate dispatch of blocked connection
+ blocked_frame = pika.frame.Method(
+ 0,
+ pika.spec.Connection.Blocked('reason'))
+ self.connection._process_frame(blocked_frame)
+
+ self.assertEqual(len(blocked_buffer), 1)
+ conn, frame = blocked_buffer[0]
+ self.assertIs(conn, self.connection)
+ self.assertIs(frame, blocked_frame)
+
+ def test_add_on_connection_unblocked_callback(self):
+ unblocked_buffer = []
+ self.connection.add_on_connection_unblocked_callback(
+ lambda conn, frame: unblocked_buffer.append((conn, frame)))
+
+ # Simulate dispatch of unblocked connection
+ unblocked_frame = pika.frame.Method(0, pika.spec.Connection.Unblocked())
+ self.connection._process_frame(unblocked_frame)
+
+ self.assertEqual(len(unblocked_buffer), 1)
+ conn, frame = unblocked_buffer[0]
+ self.assertIs(conn, self.connection)
+ self.assertIs(frame, unblocked_frame)
+
@mock.patch.object(
connection.Connection,
'connect',
@@ -714,6 +744,7 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
blocked_connection_timeout=60))
conn._on_connection_blocked(
+ conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
# Check
@@ -736,6 +767,7 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
# Simulate Connection.Blocked trigger
conn._on_connection_blocked(
+ conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
# Check
@@ -748,6 +780,7 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
# Simulate Connection.Blocked trigger again
conn._on_connection_blocked(
+ conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
self.assertEqual(conn.add_timeout.call_count, 1)
@@ -770,6 +803,7 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
blocked_connection_timeout=60))
conn._on_connection_blocked(
+ conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
conn._on_blocked_connection_timeout()
@@ -798,6 +832,7 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
blocked_connection_timeout=60))
conn._on_connection_blocked(
+ conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
self.assertIsNotNone(conn._blocked_conn_timer)
@@ -805,6 +840,7 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
timer = conn._blocked_conn_timer
conn._on_connection_unblocked(
+ conn,
mock.Mock(name='frame.Method(Connection.Unblocked)'))
# Check
@@ -829,6 +865,7 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
# Simulate Connection.Blocked
conn._on_connection_blocked(
+ conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
self.assertIsNotNone(conn._blocked_conn_timer)
@@ -837,6 +874,7 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
# Simulate Connection.Unblocked
conn._on_connection_unblocked(
+ conn,
mock.Mock(name='frame.Method(Connection.Unblocked)'))
# Check
@@ -845,6 +883,7 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
# Simulate Connection.Unblocked again
conn._on_connection_unblocked(
+ conn,
mock.Mock(name='frame.Method(Connection.Unblocked)'))
self.assertEqual(conn.remove_timeout.call_count, 1)
@@ -872,6 +911,7 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
blocked_connection_timeout=60))
conn._on_connection_blocked(
+ conn,
mock.Mock(name='frame.Method(Connection.Blocked)'))
self.assertIsNotNone(conn._blocked_conn_timer)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[tornado,twisted]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"yapf",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
coverage==6.2
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
mock==5.2.0
nose==1.3.7
packaging==21.3
-e git+https://github.com/pika/pika.git@7f81bc721807c5416726016838398804c07effbd#egg=pika
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
tomli==1.2.3
tornado==6.1
Twisted==15.3.0
typing_extensions==4.1.1
urllib3==1.26.20
yapf==0.32.0
zipp==3.6.0
zope.interface==5.5.2
| name: pika
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- codecov==2.1.13
- coverage==6.2
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==5.2.0
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- tomli==1.2.3
- tornado==6.1
- twisted==15.3.0
- typing-extensions==4.1.1
- urllib3==1.26.20
- yapf==0.32.0
- zipp==3.6.0
- zope-interface==5.5.2
prefix: /opt/conda/envs/pika
| [
"tests/unit/connection_tests.py::ConnectionTests::test_add_on_connection_blocked_callback",
"tests/unit/connection_tests.py::ConnectionTests::test_add_on_connection_unblocked_callback",
"tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_multiple_blocked_in_a_row_sets_timer_once",
"tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_multiple_unblocked_in_a_row_removes_timer_once",
"tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_on_terminate_removes_timer",
"tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_timeout_teminates_connection",
"tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_unblocked_removes_timer",
"tests/unit/connection_tests.py::ConnectionTests::test_connection_blocked_sets_timer"
] | [
"tests/acceptance/blocking_adapter_test.py::TestCreateAndCloseConnection::test",
"tests/acceptance/blocking_adapter_test.py::TestMultiCloseConnection::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionContextManagerClosesConnection::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionContextManagerClosesConnectionAndPassesOriginalException::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionContextManagerClosesConnectionAndPassesSystemException::test",
"tests/acceptance/blocking_adapter_test.py::TestLostConnectionResultsInIsClosedConnectionAndChannel::test",
"tests/acceptance/blocking_adapter_test.py::TestInvalidExchangeTypeRaisesConnectionClosed::test",
"tests/acceptance/blocking_adapter_test.py::TestCreateAndCloseConnectionWithChannelAndConsumer::test",
"tests/acceptance/blocking_adapter_test.py::TestSuddenBrokerDisconnectBeforeChannel::test",
"tests/acceptance/blocking_adapter_test.py::TestNoAccessToFileDescriptorAfterConnectionClosed::test",
"tests/acceptance/blocking_adapter_test.py::TestDisconnectDuringConnectionStart::test",
"tests/acceptance/blocking_adapter_test.py::TestDisconnectDuringConnectionTune::test",
"tests/acceptance/blocking_adapter_test.py::TestProcessDataEvents::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionRegisterForBlockAndUnblock::test",
"tests/acceptance/blocking_adapter_test.py::TestBlockedConnectionTimeout::test",
"tests/acceptance/blocking_adapter_test.py::TestAddCallbackThreadsafeFromSameThread::test",
"tests/acceptance/blocking_adapter_test.py::TestAddCallbackThreadsafeFromAnotherThread::test",
"tests/acceptance/blocking_adapter_test.py::TestAddTimeoutRemoveTimeout::test",
"tests/acceptance/blocking_adapter_test.py::TestViabilityOfMultipleTimeoutsWithSameDeadlineAndCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestRemoveTimeoutFromTimeoutCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestSleep::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionProperties::test",
"tests/acceptance/blocking_adapter_test.py::TestCreateAndCloseChannel::test",
"tests/acceptance/blocking_adapter_test.py::TestExchangeDeclareAndDelete::test",
"tests/acceptance/blocking_adapter_test.py::TestExchangeBindAndUnbind::test",
"tests/acceptance/blocking_adapter_test.py::TestQueueDeclareAndDelete::test",
"tests/acceptance/blocking_adapter_test.py::TestPassiveQueueDeclareOfUnknownQueueRaisesChannelClosed::test",
"tests/acceptance/blocking_adapter_test.py::TestQueueBindAndUnbindAndPurge::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicGet::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicReject::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicRejectNoRequeue::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicNack::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicNackNoRequeue::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicNackMultiple::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicRecoverWithRequeue::test",
"tests/acceptance/blocking_adapter_test.py::TestTxCommit::test",
"tests/acceptance/blocking_adapter_test.py::TestTxRollback::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicConsumeFromUnknownQueueRaisesChannelClosed::test",
"tests/acceptance/blocking_adapter_test.py::TestPublishAndBasicPublishWithPubacksUnroutable::test",
"tests/acceptance/blocking_adapter_test.py::TestConfirmDeliveryAfterUnroutableMessage::test",
"tests/acceptance/blocking_adapter_test.py::TestUnroutableMessagesReturnedInNonPubackMode::test",
"tests/acceptance/blocking_adapter_test.py::TestUnroutableMessageReturnedInPubackMode::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicPublishDeliveredWhenPendingUnroutable::test",
"tests/acceptance/blocking_adapter_test.py::TestPublishAndConsumeWithPubacksAndQosOfOne::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicConsumeWithAckFromAnotherThread::test",
"tests/acceptance/blocking_adapter_test.py::TestConsumeGeneratorWithAckFromAnotherThread::test",
"tests/acceptance/blocking_adapter_test.py::TestTwoBasicConsumersOnSameChannel::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicCancelPurgesPendingConsumerCancellationEvt::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicPublishWithoutPubacks::test",
"tests/acceptance/blocking_adapter_test.py::TestPublishFromBasicConsumeCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestStopConsumingFromBasicConsumeCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestCloseChannelFromBasicConsumeCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestCloseConnectionFromBasicConsumeCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestNonPubAckPublishAndConsumeHugeMessage::test",
"tests/acceptance/blocking_adapter_test.py::TestNonPubackPublishAndConsumeManyMessages::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicCancelWithNonAckableConsumer::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicCancelWithAckableConsumer::test",
"tests/acceptance/blocking_adapter_test.py::TestUnackedMessageAutoRestoredToQueueOnChannelClose::test",
"tests/acceptance/blocking_adapter_test.py::TestNoAckMessageNotRestoredToQueueOnChannelClose::test",
"tests/acceptance/blocking_adapter_test.py::TestConsumeInactivityTimeout::test",
"tests/acceptance/blocking_adapter_test.py::TestConsumeGeneratorInterruptedByCancelFromBroker::test",
"tests/acceptance/blocking_adapter_test.py::TestConsumeGeneratorCancelEncountersCancelFromBroker::test",
"tests/acceptance/blocking_adapter_test.py::TestChannelFlow::test"
] | [
"tests/acceptance/blocking_adapter_test.py::TestConnectWithDownedBroker::test",
"tests/acceptance/blocking_adapter_test.py::TestDisconnectDuringConnectionProtocol::test",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_channel",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_close",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_close_with_channel_closed_exception",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_connection_attempts_with_timeout",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_connection_blocked_evt",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_connection_unblocked_evt",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_constructor",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output_server_initiated_error_close",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output_server_initiated_no_error_close",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_flush_output_user_initiated_close",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_process_io_for_connection_setup",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_process_io_for_connection_setup_fails_with_open_error",
"tests/unit/blocking_connection_tests.py::BlockingConnectionTests::test_sleep",
"tests/unit/connection_tests.py::ConnectionTests::test_add_callbacks",
"tests/unit/connection_tests.py::ConnectionTests::test_add_on_close_callback",
"tests/unit/connection_tests.py::ConnectionTests::test_add_on_open_error_callback",
"tests/unit/connection_tests.py::ConnectionTests::test_channel",
"tests/unit/connection_tests.py::ConnectionTests::test_channel_on_closed_connection_raises_connection_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_channel_on_closing_connection_raises_connection_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_channel_on_init_connection_raises_connection_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_channel_on_protocol_connection_raises_connection_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_channel_on_start_connection_raises_connection_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_channel_on_tune_connection_raises_connection_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_client_properties",
"tests/unit/connection_tests.py::ConnectionTests::test_client_properties_default",
"tests/unit/connection_tests.py::ConnectionTests::test_client_properties_override",
"tests/unit/connection_tests.py::ConnectionTests::test_close_bails_out_if_already_closed_or_closing",
"tests/unit/connection_tests.py::ConnectionTests::test_close_calls_on_close_ready_when_no_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_close_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_close_closes_open_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_close_closes_opening_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_close_does_not_close_closing_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_connect_no_adapter_connect_from_constructor",
"tests/unit/connection_tests.py::ConnectionTests::test_create_with_blocked_connection_timeout_config",
"tests/unit/connection_tests.py::ConnectionTests::test_deliver_frame_to_channel_with_frame_for_unknown_channel",
"tests/unit/connection_tests.py::ConnectionTests::test_new_conn_should_use_first_channel",
"tests/unit/connection_tests.py::ConnectionTests::test_next_channel_number_returns_lowest_unused",
"tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_closing_state_last_channel_calls_on_close_ready",
"tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_closing_state_more_channels_no_on_close_ready",
"tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_non_closing_state",
"tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_with_closing_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connect_timer",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connect_timer_reconnect",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connection_close_ok",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connection_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connection_start",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connection_tune",
"tests/unit/connection_tests.py::ConnectionTests::test_on_data_available",
"tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_cleans_up",
"tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_invokes_access_denied_on_connection_error_and_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_invokes_auth_on_connection_error_and_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_invokes_connection_closed_callback",
"tests/unit/connection_tests.py::ConnectionTests::test_on_terminate_invokes_protocol_on_connection_error_and_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_send_message_updates_frames_sent_and_bytes_sent",
"tests/unit/connection_tests.py::ConnectionTests::test_set_backpressure_multiplier"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,221 | 1,648 | [
"pika/adapters/blocking_connection.py",
"pika/connection.py"
] |
NeurodataWithoutBorders__pynwb-381 | a11f8e4edb84fa318b84035d0debe5c764bf7f31 | 2018-02-27 01:52:50 | f749097718cf344f4c95de7771a1ef523f26762f | diff --git a/src/pynwb/ogen.py b/src/pynwb/ogen.py
index d05832fd..a4a21f9c 100644
--- a/src/pynwb/ogen.py
+++ b/src/pynwb/ogen.py
@@ -5,6 +5,7 @@ from .form.utils import docval, popargs, fmt_docval_args
from . import register_class, CORE_NAMESPACE
from .base import TimeSeries, _default_resolution, _default_conversion
from .core import NWBContainer
+from .ecephys import Device
@register_class('OptogeneticStimulusSite', CORE_NAMESPACE)
@@ -19,7 +20,7 @@ class OptogeneticStimulusSite(NWBContainer):
@docval({'name': 'name', 'type': str, 'doc': 'The name of this stimulus site'},
{'name': 'source', 'type': str, 'doc': 'the source of the data'},
- {'name': 'device', 'type': str, 'doc': 'Name of device in /general/devices'},
+ {'name': 'device', 'type': Device, 'doc': 'the device that was used'},
{'name': 'description', 'type': str, 'doc': 'Description of site.'},
{'name': 'excitation_lambda', 'type': str, 'doc': 'Excitation wavelength.'},
{'name': 'location', 'type': str, 'doc': 'Location of stimulation site.'})
| `nwbfile.create_ogen_site` device argument expects string
the `device` argument of `nwbfile.create_ogen_site` takes a string that is the name of the device. I think it should also take the device object. This will make usage more consistent with `nwbfile.create_electrode_group` and `nwbfile.add_electrode` (with `electrode_group`) where the object is given, not the name of the object. Is there a reason the name of the device is used here? | NeurodataWithoutBorders/pynwb | diff --git a/tests/unit/pynwb_tests/test_ogen.py b/tests/unit/pynwb_tests/test_ogen.py
index bee99992..b80243d4 100644
--- a/tests/unit/pynwb_tests/test_ogen.py
+++ b/tests/unit/pynwb_tests/test_ogen.py
@@ -1,14 +1,16 @@
import unittest
from pynwb.ogen import OptogeneticSeries, OptogeneticStimulusSite
+from pynwb.ecephys import Device
class OptogeneticSeriesConstructor(unittest.TestCase):
def test_init(self):
- oS = OptogeneticStimulusSite('site1', 'a test source', 'device', 'description', 'excitation_lambda', 'location')
+ device = Device('name', 'source')
+ oS = OptogeneticStimulusSite('site1', 'a test source', device, 'description', 'excitation_lambda', 'location')
self.assertEqual(oS.name, 'site1')
- self.assertEqual(oS.device, 'device')
+ self.assertEqual(oS.device, device)
self.assertEqual(oS.description, 'description')
self.assertEqual(oS.excitation_lambda, 'excitation_lambda')
self.assertEqual(oS.location, 'location')
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio",
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2017.11.5
chardet==3.0.4
coverage==6.2
execnet==1.9.0
h5py==2.7.1
idna==2.6
importlib-metadata==4.8.3
iniconfig==1.1.1
numpy==1.14.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/NeurodataWithoutBorders/pynwb.git@a11f8e4edb84fa318b84035d0debe5c764bf7f31#egg=pynwb
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.6.1
requests==2.18.4
ruamel.yaml==0.15.35
six==1.11.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.22
zipp==3.6.0
| name: pynwb
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- certifi==2017.11.5
- chardet==3.0.4
- coverage==6.2
- execnet==1.9.0
- h5py==2.7.1
- idna==2.6
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- numpy==1.14.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.6.1
- requests==2.18.4
- ruamel-yaml==0.15.35
- six==1.11.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.22
- zipp==3.6.0
prefix: /opt/conda/envs/pynwb
| [
"tests/unit/pynwb_tests/test_ogen.py::OptogeneticSeriesConstructor::test_init"
] | [] | [] | [] | BSD-3-Clause | 2,222 | 334 | [
"src/pynwb/ogen.py"
] |
|
joblib__joblib-639 | d3e478f2822dde4181927b9efb6064cf00b92996 | 2018-02-27 16:54:52 | 902fb6bbcf75c461d1b6703e5a01605fc592f214 | codecov[bot]: # [Codecov](https://codecov.io/gh/joblib/joblib/pull/639?src=pr&el=h1) Report
> Merging [#639](https://codecov.io/gh/joblib/joblib/pull/639?src=pr&el=desc) into [master](https://codecov.io/gh/joblib/joblib/commit/a0e1f69d2be31e9e6be1f5e346988bc04df7ff75?src=pr&el=desc) will **increase** coverage by `0.44%`.
> The diff coverage is `100%`.
[](https://codecov.io/gh/joblib/joblib/pull/639?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #639 +/- ##
==========================================
+ Coverage 94.52% 94.96% +0.44%
==========================================
Files 39 39
Lines 5403 5407 +4
==========================================
+ Hits 5107 5135 +28
+ Misses 296 272 -24
```
| [Impacted Files](https://codecov.io/gh/joblib/joblib/pull/639?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [joblib/memory.py](https://codecov.io/gh/joblib/joblib/pull/639/diff?src=pr&el=tree#diff-am9ibGliL21lbW9yeS5weQ==) | `94.98% <100%> (+0.02%)` | :arrow_up: |
| [joblib/test/test\_memory.py](https://codecov.io/gh/joblib/joblib/pull/639/diff?src=pr&el=tree#diff-am9ibGliL3Rlc3QvdGVzdF9tZW1vcnkucHk=) | `98.14% <100%> (ø)` | :arrow_up: |
| [joblib/test/test\_parallel.py](https://codecov.io/gh/joblib/joblib/pull/639/diff?src=pr&el=tree#diff-am9ibGliL3Rlc3QvdGVzdF9wYXJhbGxlbC5weQ==) | `96.27% <0%> (+0.35%)` | :arrow_up: |
| [joblib/\_parallel\_backends.py](https://codecov.io/gh/joblib/joblib/pull/639/diff?src=pr&el=tree#diff-am9ibGliL19wYXJhbGxlbF9iYWNrZW5kcy5weQ==) | `94.39% <0%> (+0.43%)` | :arrow_up: |
| [joblib/numpy\_pickle\_utils.py](https://codecov.io/gh/joblib/joblib/pull/639/diff?src=pr&el=tree#diff-am9ibGliL251bXB5X3BpY2tsZV91dGlscy5weQ==) | `93.29% <0%> (+0.63%)` | :arrow_up: |
| [joblib/hashing.py](https://codecov.io/gh/joblib/joblib/pull/639/diff?src=pr&el=tree#diff-am9ibGliL2hhc2hpbmcucHk=) | `92.98% <0%> (+0.87%)` | :arrow_up: |
| [joblib/\_memmapping\_reducer.py](https://codecov.io/gh/joblib/joblib/pull/639/diff?src=pr&el=tree#diff-am9ibGliL19tZW1tYXBwaW5nX3JlZHVjZXIucHk=) | `95% <0%> (+1.11%)` | :arrow_up: |
| [joblib/logger.py](https://codecov.io/gh/joblib/joblib/pull/639/diff?src=pr&el=tree#diff-am9ibGliL2xvZ2dlci5weQ==) | `86.84% <0%> (+1.31%)` | :arrow_up: |
| [joblib/numpy\_pickle.py](https://codecov.io/gh/joblib/joblib/pull/639/diff?src=pr&el=tree#diff-am9ibGliL251bXB5X3BpY2tsZS5weQ==) | `98.47% <0%> (+2.03%)` | :arrow_up: |
| [joblib/pool.py](https://codecov.io/gh/joblib/joblib/pull/639/diff?src=pr&el=tree#diff-am9ibGliL3Bvb2wucHk=) | `91.37% <0%> (+2.58%)` | :arrow_up: |
| ... and [2 more](https://codecov.io/gh/joblib/joblib/pull/639/diff?src=pr&el=tree-more) | |
------
[Continue to review full report at Codecov](https://codecov.io/gh/joblib/joblib/pull/639?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/joblib/joblib/pull/639?src=pr&el=footer). Last update [a0e1f69...4f88f5a](https://codecov.io/gh/joblib/joblib/pull/639?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
lesteve: Merging, thanks a lot @aabadie! | diff --git a/joblib/memory.py b/joblib/memory.py
index 0f005dd..18b20f5 100644
--- a/joblib/memory.py
+++ b/joblib/memory.py
@@ -767,7 +767,7 @@ class Memory(Logger):
The path of the base directory to use as a data store
or None. If None is given, no caching is done and
the Memory object is completely transparent. This option
- replaces cachedir since version 0.11.
+ replaces cachedir since version 0.12.
backend: str or 'local'
Type of store backend for reading/writing cache files.
@@ -776,8 +776,8 @@ class Memory(Logger):
backend.
cachedir: str or None
- cachedir is deprecated since version 0.11 and will be
- removed in 0.13. Please consider using location option instead.
+ cachedir is deprecated since version 0.12 and will be
+ removed in 0.14. Please consider using location option instead.
The path of the base directory to use as a data store
or None. If None is given, no caching is done and
the Memory object is completely transparent.
@@ -818,17 +818,18 @@ class Memory(Logger):
if cachedir is not None:
if location is None:
warnings.warn("cachedir option is deprecated since version "
- "0.10 and will be removed after version 0.12.\n"
+ "0.12 and will be removed in version 0.14.\n"
"Use option location=<store location> "
"instead.", DeprecationWarning, stacklevel=2)
location = cachedir
else:
warnings.warn("You set both location and cachedir options."
"cachedir is deprecated since version "
- "0.10 and will be removed after version 0.12.\n"
+ "0.12 and will be removed in version 0.14.\n"
"cachedir value will be ignored.",
DeprecationWarning, stacklevel=2)
+ self.location = location
if isinstance(location, _basestring):
location = os.path.join(location, 'joblib')
@@ -837,6 +838,14 @@ class Memory(Logger):
backend_options=dict(compress=compress, mmap_mode=mmap_mode,
**backend_options))
+ @property
+ def cachedir(self):
+ warnings.warn("cachedir option is deprecated since version "
+ "0.12 and will be removed in version 0.14.\n"
+ "Use option location=<store location> "
+ "instead.", DeprecationWarning, stacklevel=2)
+ return self.location
+
def cache(self, func=None, ignore=None, verbose=None, mmap_mode=False):
""" Decorates the given function func to only compute its return
value for input arguments not cached on disk.
| Memory object does not have a cachedir attribute in master
Seems like an oversight from #397. The doc says that cachedir is deprecated but actually it is not set:
https://github.com/joblib/joblib/blob/a0e1f69d2be31e9e6be1f5e346988bc04df7ff75/joblib/memory.py#L778-L783
It'd be a good idea to double-check that other similar things have not been overseen.
Snippet:
```python
from joblib import Memory
mem = Memory('/tmp/test')
print(mem.cachedir)
```
| joblib/joblib | diff --git a/joblib/test/test_memory.py b/joblib/test/test_memory.py
index 7d74506..00f384c 100644
--- a/joblib/test/test_memory.py
+++ b/joblib/test/test_memory.py
@@ -383,6 +383,11 @@ def test_func_dir(tmpdir):
location = os.path.join(g.store_backend.location, func_id)
assert location == path
assert os.path.exists(path)
+ assert memory.location == os.path.dirname(g.store_backend.location)
+ with warns(DeprecationWarning) as w:
+ assert memory.cachedir == os.path.dirname(g.store_backend.location)
+ assert len(w) == 1
+ assert "cachedir option is deprecated since version" in str(w[-1].message)
# Test that the code is stored.
# For the following test to be robust to previous execution, we clear
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.11 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-timeout",
"codecov"
],
"pre_install": [],
"python": "3.6",
"reqs_path": [
"continuous_integration/appveyor/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
coverage==6.2
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/joblib/joblib.git@d3e478f2822dde4181927b9efb6064cf00b92996#egg=joblib
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-timeout==2.1.0
requests==2.27.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: joblib
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- codecov==2.1.13
- coverage==6.2
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-timeout==2.1.0
- requests==2.27.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/joblib
| [
"joblib/test/test_memory.py::test_func_dir"
] | [] | [
"joblib/test/test_memory.py::test_memory_integration",
"joblib/test/test_memory.py::test_no_memory",
"joblib/test/test_memory.py::test_memory_kwarg",
"joblib/test/test_memory.py::test_memory_lambda",
"joblib/test/test_memory.py::test_memory_name_collision",
"joblib/test/test_memory.py::test_memory_warning_lambda_collisions",
"joblib/test/test_memory.py::test_memory_warning_collision_detection",
"joblib/test/test_memory.py::test_memory_partial",
"joblib/test/test_memory.py::test_memory_eval",
"joblib/test/test_memory.py::test_argument_change",
"joblib/test/test_memory.py::test_memory_numpy[None]",
"joblib/test/test_memory.py::test_memory_numpy[r]",
"joblib/test/test_memory.py::test_memory_numpy_check_mmap_mode",
"joblib/test/test_memory.py::test_memory_exception",
"joblib/test/test_memory.py::test_memory_ignore",
"joblib/test/test_memory.py::test_partial_decoration[ignore0-100-r]",
"joblib/test/test_memory.py::test_partial_decoration[ignore1-10-None]",
"joblib/test/test_memory.py::test_persistence",
"joblib/test/test_memory.py::test_call_and_shelve",
"joblib/test/test_memory.py::test_memorized_pickling",
"joblib/test/test_memory.py::test_memorized_repr",
"joblib/test/test_memory.py::test_memory_file_modification",
"joblib/test/test_memory.py::test_memory_in_memory_function_code_change",
"joblib/test/test_memory.py::test_clear_memory_with_none_location",
"joblib/test/test_memory.py::test_memory_func_with_kwonly_args",
"joblib/test/test_memory.py::test_memory_func_with_signature",
"joblib/test/test_memory.py::test__get_items",
"joblib/test/test_memory.py::test__get_items_to_delete",
"joblib/test/test_memory.py::test_memory_reduce_size",
"joblib/test/test_memory.py::test_memory_clear",
"joblib/test/test_memory.py::test_cached_function_race_condition_when_persisting_output",
"joblib/test/test_memory.py::test_cached_function_race_condition_when_persisting_output_2",
"joblib/test/test_memory.py::test_concurrency_safe_write[multiprocessing]",
"joblib/test/test_memory.py::test_concurrency_safe_write[loky]",
"joblib/test/test_memory.py::test_concurrency_safe_write[threading]",
"joblib/test/test_memory.py::test_memory_recomputes_after_an_error_why_loading_results",
"joblib/test/test_memory.py::test_cachedir_deprecation_warning",
"joblib/test/test_memory.py::test_register_invalid_store_backends_key[None]",
"joblib/test/test_memory.py::test_register_invalid_store_backends_key[invalid_prefix1]",
"joblib/test/test_memory.py::test_register_invalid_store_backends_key[invalid_prefix2]",
"joblib/test/test_memory.py::test_register_invalid_store_backends_object",
"joblib/test/test_memory.py::test_memory_default_store_backend",
"joblib/test/test_memory.py::test_instanciate_incomplete_store_backend",
"joblib/test/test_memory.py::test_dummy_store_backend"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,224 | 688 | [
"joblib/memory.py"
] |
poliastro__poliastro-330 | 60d463f8043de496eb377e2bfb798560910fb679 | 2018-03-01 00:27:02 | 050339c63c6eab6a1a58adc286f423f5265e0cac | diff --git a/src/poliastro/plotting.py b/src/poliastro/plotting.py
index 48bc1e9f..ad3af768 100644
--- a/src/poliastro/plotting.py
+++ b/src/poliastro/plotting.py
@@ -131,6 +131,12 @@ class OrbitPlotter(object):
self.ax.add_patch(mpl.patches.Circle((0, 0), radius, lw=0, color=color))
+ def _project(self, rr):
+ rr_proj = rr - rr.dot(self._frame[2])[:, None] * self._frame[2]
+ x = rr_proj.dot(self._frame[0])
+ y = rr_proj.dot(self._frame[1])
+ return x, y
+
def plot(self, orbit, label=None, color=None):
"""Plots state and osculating orbit in their plane.
@@ -156,15 +162,15 @@ class OrbitPlotter(object):
# Project on OrbitPlotter frame
# x_vec, y_vec, z_vec = self._frame
- rr_proj = rr - rr.dot(self._frame[2])[:, None] * self._frame[2]
- x = rr_proj.dot(self._frame[0])
- y = rr_proj.dot(self._frame[1])
+ x, y = self._project(rr)
+ x0, y0 = self._project(orbit.r[None])
# Plot current position
- l, = self.ax.plot(x[0].to(u.km).value, y[0].to(u.km).value,
+ l, = self.ax.plot(x0.to(u.km).value, y0.to(u.km).value,
'o', mew=0, color=color)
lines.append(l)
+ # Plot trajectory
l, = self.ax.plot(x.to(u.km).value, y.to(u.km).value,
'--', color=l.get_color())
lines.append(l)
diff --git a/src/poliastro/twobody/orbit.py b/src/poliastro/twobody/orbit.py
index d2ab7542..82c241c0 100644
--- a/src/poliastro/twobody/orbit.py
+++ b/src/poliastro/twobody/orbit.py
@@ -298,20 +298,25 @@ class Orbit(object):
# first sample eccentric anomaly, then transform into true anomaly
# why sampling eccentric anomaly uniformly to minimize error in the apocenter, see
# http://www.dtic.mil/dtic/tr/fulltext/u2/a605040.pdf
+ # Start from pericenter
E_values = np.linspace(0, 2 * np.pi, values) * u.rad
nu_values = E_to_nu(E_values, self.ecc)
else:
# Select a sensible limiting value for non-closed orbits
- # This corresponds to r = 3p
- nu_limit = np.arccos(-(1 - 1 / 3.) / self.ecc)
+ # This corresponds to max(r = 3p, r = self.r)
+ # We have to wrap nu in [-180, 180) to compare it with the output of
+ # the arc cosine, which is in the range [0, 180)
+ # Start from -nu_limit
+ wrapped_nu = self.nu if self.nu < 180 * u.deg else self.nu - 360 * u.deg
+ nu_limit = max(np.arccos(-(1 - 1 / 3.) / self.ecc), wrapped_nu)
nu_values = np.linspace(-nu_limit, nu_limit, values)
- nu_values = np.insert(nu_values, 0, self.ecc)
return self.sample(nu_values, method)
elif hasattr(values, "unit") and values.unit in ('rad', 'deg'):
values = self._generate_time_values(values)
- return (values, self._sample(values, method))
+
+ return values, self._sample(values, method)
def _sample(self, time_values, method=mean_motion):
values = np.zeros((len(time_values), 3)) * self.r.unit
@@ -322,7 +327,8 @@ class Orbit(object):
return CartesianRepresentation(values, xyz_axis=1)
def _generate_time_values(self, nu_vals):
- M_vals = nu_to_M(nu_vals, self.ecc)
+ # Subtract current anomaly to start from the desired point
+ M_vals = nu_to_M(nu_vals, self.ecc) - nu_to_M(self.nu, self.ecc)
time_values = self.epoch + (M_vals / self.n).decompose()
return time_values
| "New Horizons launch" example won't run on master
<!--- Provide a general summary of the issue in the Title above -->
🐞 **Problem**
<!--- Provide a detailed description of the change or addition you are proposing -->
<!--- If it is a feature or a bug, what problem is it solving-->
[Exploring the new Horizons launch](https://github.com/poliastro/poliastro/blob/d32f3ab802f782a03582e6c183a28c12c6abb2d7/docs/source/examples/Exploring%20the%20New%20Horizons%20launch.ipynb) example fails on `In [5]` when poliastro is installed from master (d32f3ab802f782a03582e6c183a28c12c6abb2d7).
<details>
```python
/usr/local/lib/python3.6/site-packages/astropy/units/quantity.py:639: RuntimeWarning:
invalid value encountered in log
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in __add__(self, other)
1369 try:
-> 1370 other = TimeDelta(other)
1371 except Exception:
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in __init__(self, val, val2, format, scale, copy)
1539
-> 1540 self._init_from_vals(val, val2, format, scale, copy)
1541
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in _init_from_vals(self, val, val2, format, scale, copy, precision, in_subfmt, out_subfmt)
329 self._time = self._get_time_fmt(val, val2, format, scale,
--> 330 precision, in_subfmt, out_subfmt)
331 self._format = self._time.name
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in _get_time_fmt(self, val, val2, format, scale, precision, in_subfmt, out_subfmt)
373 else:
--> 374 raise ValueError('Input values did not match {0}'.format(err_msg))
375
ValueError: Input values did not match the format class jd
During handling of the above exception, another exception occurred:
OperandTypeError Traceback (most recent call last)
<ipython-input-5-f1d654cf6abc> in <module>()
2
3 op.plot(parking)
----> 4 op.plot(exit)
5
6 plt.xlim(-8000, 8000)
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/plotting.py in plot(self, orbit, label, color)
152 lines = []
153
--> 154 _, positions = orbit.sample(self.num_points)
155 rr = positions.get_xyz().transpose()
156
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/twobody/orbit.py in sample(self, values, function)
308 nu_values = np.insert(nu_values, 0, self.ecc)
309
--> 310 return self.sample(nu_values, function)
311
312 elif hasattr(values, "unit") and values.unit in ('rad', 'deg'):
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/twobody/orbit.py in sample(self, values, function)
311
312 elif hasattr(values, "unit") and values.unit in ('rad', 'deg'):
--> 313 values = self._generate_time_values(values)
314 return (values, self._sample(values, function))
315
/usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/twobody/orbit.py in _generate_time_values(self, nu_vals)
324 def _generate_time_values(self, nu_vals):
325 M_vals = nu_to_M(nu_vals, self.ecc)
--> 326 time_values = self.epoch + (M_vals / self.n).decompose()
327 return time_values
328
/usr/local/lib/python3.6/site-packages/astropy/time/core.py in __add__(self, other)
1370 other = TimeDelta(other)
1371 except Exception:
-> 1372 raise OperandTypeError(self, other, '+')
1373
1374 # Tdelta + something is dealt with in TimeDelta, so we have
OperandTypeError: Unsupported operand type(s) for +: 'Time' and 'Quantity'
```
</details><br>
🖥 **Please paste the output of following commands**
(Installed poliastro without conda)
```
$ python3 -V
Python 3.6.4
$ uname -a
Linux linux 4.13.0-32-generic #35~16.04.1-Ubuntu SMP Thu Jan 25 10:13:43 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
```
<details>
<summary>Running tests</summary>
```
$ python3 -c "import poliastro.testing; poliastro.testing.test()"
======================================= test session starts ========================================
platform linux -- Python 3.6.4, pytest-3.4.1, py-1.5.2, pluggy-0.6.0
rootdir: /usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro, inifile:
plugins: rerunfailures-4.0, remotedata-0.2.0, openfiles-0.2.0, mock-1.6.3, doctestplus-0.1.2, cov-2.5.1, arraydiff-0.2, hypothesis-3.45.0
collected 140 items
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/test_bodies.py . [ 0%]
..... [ 4%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/test_coordinates.py . [ 5%]
.. [ 6%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/test_hyper.py . [ 7%]
.......... [ 14%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/test_iod.py . [ 15%]
........ [ 20%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/test_jit.py . [ 21%]
.. [ 22%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/test_maneuver.py . [ 23%]
..... [ 27%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/test_patched_conics.py . [ 27%]
. [ 28%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/test_plotting.py . [ 29%]
....... [ 34%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/test_plotting3d.py . [ 35%]
..... [ 38%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/test_stumpff.py . [ 39%]
.. [ 40%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/test_twobody.py . [ 41%]
. [ 42%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/test_util.py . [ 42%]
...... [ 47%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/tests_neos/test_dastcom5.py . [ 47%]
........ [ 53%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/tests_neos/test_neos_neows.py . [ 54%]
..... [ 57%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/tests_twobody/test_angles.py . [ 58%]
...... [ 62%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/tests_twobody/test_decorators.py . [ 63%]
. [ 64%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/tests_twobody/test_orbit.py . [ 65%]
.................. [ 77%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/tests_twobody/test_propagation.py . [ 78%]
........ [ 84%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/tests_twobody/test_sample.py . [ 85%]
........... [ 92%]
../../usr/local/lib/python3.6/site-packages/poliastro-0.9.dev0-py3.6.egg/poliastro/tests/tests_twobody/test_states.py . [ 93%]
......... [100%]
=================================== 140 passed in 23.95 seconds ====================================
```
</details><br>
Looks like 27318862c266caf51a96857864eaf3045f613dae (was later on modified by 92b6c9dbcfa2d5fcf707094642821725465b131e) caused this regression.
🎯 **Goal**
<!--- Why is this change important to you? How would you use it? -->
<!--- How can it benefit other users? -->
💡 **Possible solutions**
<!--- Not obligatory, but suggest an idea for implementing addition or change -->
📋 **Steps to solve the problem**
* Comment below about what you've started working on.
* Add, commit, push your changes
* Submit a pull request and add this in comments - `Addresses #<put issue number here>`
* Ask for a review in comments section of pull request
* Celebrate your contribution to this project 🎉
| poliastro/poliastro | diff --git a/src/poliastro/tests/tests_twobody/test_orbit.py b/src/poliastro/tests/tests_twobody/test_orbit.py
index 14b0835d..1649570c 100644
--- a/src/poliastro/tests/tests_twobody/test_orbit.py
+++ b/src/poliastro/tests/tests_twobody/test_orbit.py
@@ -181,7 +181,7 @@ def test_sample_with_time_value():
ss = Orbit.from_classical(_body, _d, _, _a, _a, _a, _a)
expected_r = [ss.r]
- _, positions = ss.sample(values=[360] * u.deg)
+ _, positions = ss.sample(values=ss.nu + [360] * u.deg)
r = positions.get_xyz().transpose()
assert_quantity_allclose(r, expected_r, rtol=1.e-7)
@@ -195,18 +195,21 @@ def test_sample_with_nu_value():
ss = Orbit.from_classical(_body, _d, _, _a, _a, _a, _a)
expected_r = [ss.r]
- _, positions = ss.sample(values=[360] * u.deg)
+ _, positions = ss.sample(values=ss.nu + [360] * u.deg)
r = positions.get_xyz().transpose()
assert_quantity_allclose(r, expected_r, rtol=1.e-7)
-def test_nu_value_check():
- _d = [1.197659243752796E+09, -4.443716685978071E+09, -1.747610548576734E+09] * u.km
- _v = [5.540549267188614E+00, -1.251544669134140E+01, -4.848892572767733E+00] * u.km / u.s
- ss = Orbit.from_vectors(Sun, _d, _v, Time('2015-07-14 07:59', scale='tdb'))
+def test_hyperbolic_nu_value_check():
+ # A custom hyperbolic orbit
+ r = [1.197659243752796E+09, -4.443716685978071E+09, -1.747610548576734E+09] * u.km
+ v = [5.540549267188614E+00, -1.251544669134140E+01, -4.848892572767733E+00] * u.km / u.s
+
+ ss = Orbit.from_vectors(Sun, r, v, Time('2015-07-14 07:59', scale='tdb'))
+
values, positions = ss.sample(100)
assert isinstance(positions, CartesianRepresentation)
assert isinstance(values, Time)
- assert len(positions) == len(values) == 101
+ assert len(positions) == len(values) == 100
diff --git a/src/poliastro/tests/tests_twobody/test_sample.py b/src/poliastro/tests/tests_twobody/test_sample.py
index 051f86c9..ae1b978c 100644
--- a/src/poliastro/tests/tests_twobody/test_sample.py
+++ b/src/poliastro/tests/tests_twobody/test_sample.py
@@ -9,6 +9,8 @@ from poliastro.twobody import Orbit
from poliastro.twobody.propagation import kepler, mean_motion, cowell
import numpy as np
+from poliastro.util import norm
+
def test_sample_angle_zero_returns_same():
# Data from Vallado, example 2.4
@@ -17,7 +19,7 @@ def test_sample_angle_zero_returns_same():
ss0 = Orbit.from_vectors(Earth, r0, v0)
nu_values = [0] * u.deg
- _, rr = ss0.sample(nu_values)
+ _, rr = ss0.sample(ss0.nu + nu_values)
assert_quantity_allclose(rr[0].get_xyz(), ss0.r)
@@ -71,7 +73,8 @@ def test_sample_nu_values():
_, rr = ss0.sample(nu_values)
assert len(rr) == len(nu_values)
- assert_quantity_allclose(rr[-1].get_xyz(), expected_ss.r)
+ assert_quantity_allclose(norm(rr[0].get_xyz()), expected_ss.r_p)
+ assert_quantity_allclose(norm(rr[-1].get_xyz()), expected_ss.r_a)
@pytest.mark.parametrize("num_points", [3, 5, 7, 9, 11, 101])
@@ -81,9 +84,10 @@ def test_sample_num_points(num_points):
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
- expected_ss = ss0.propagate(ss0.period / 2)
+ # TODO: Test against the perigee and apogee
+ # expected_ss = ss0.propagate(ss0.period / 2)
_, rr = ss0.sample(num_points)
assert len(rr) == num_points
- assert_quantity_allclose(rr[num_points // 2].get_xyz(), expected_ss.r)
+ # assert_quantity_allclose(rr[num_points // 2].get_xyz(), expected_ss.r)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_git_commit_hash",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 2
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
astropy==3.2.3
async-generator==1.10
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
backcall==0.2.0
beautifulsoup4==4.12.3
bleach==4.1.0
CALLHORIZONS==1.1.1
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
comm==0.1.4
coverage==6.2
cycler==0.11.0
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.16
entrypoints==0.4
idna==3.10
imagesize==1.4.1
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
jplephem==2.22
jsonschema==3.2.0
jupyter-client==7.1.2
jupyter-core==4.9.2
jupyterlab-pygments==0.1.2
jupyterlab_widgets==1.1.11
kiwisolver==1.3.1
llvmlite==0.36.0
MarkupSafe==2.0.1
matplotlib==3.3.4
mistune==0.8.4
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nbsphinx==0.3.5
nest-asyncio==1.6.0
notebook==6.4.10
numba==0.53.1
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
Pillow==8.4.0
plotly==5.18.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
-e git+https://github.com/poliastro/poliastro.git@60d463f8043de496eb377e2bfb798560910fb679#egg=poliastro
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.10.0
pycparser==2.21
Pygments==2.14.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pyrsistent==0.18.0
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
scipy==1.5.4
Send2Trash==1.8.3
six==1.17.0
snowballstemmer==2.2.0
soupsieve==2.3.2.post1
Sphinx==1.5.6
sphinx-rtd-theme==0.5.2
tenacity==8.2.2
terminado==0.12.1
testpath==0.6.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
widgetsnbextension==3.6.10
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: poliastro
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- astropy==3.2.3
- async-generator==1.10
- babel==2.11.0
- backcall==0.2.0
- beautifulsoup4==4.12.3
- bleach==4.1.0
- callhorizons==1.1.1
- cffi==1.15.1
- charset-normalizer==2.0.12
- comm==0.1.4
- coverage==6.2
- cycler==0.11.0
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.16
- entrypoints==0.4
- idna==3.10
- imagesize==1.4.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- jplephem==2.22
- jsonschema==3.2.0
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- jupyterlab-pygments==0.1.2
- jupyterlab-widgets==1.1.11
- kiwisolver==1.3.1
- llvmlite==0.36.0
- markupsafe==2.0.1
- matplotlib==3.3.4
- mistune==0.8.4
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbsphinx==0.3.5
- nest-asyncio==1.6.0
- notebook==6.4.10
- numba==0.53.1
- numpy==1.19.5
- pandas==1.1.5
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pillow==8.4.0
- plotly==5.18.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- pycodestyle==2.10.0
- pycparser==2.21
- pygments==2.14.0
- pyrsistent==0.18.0
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- scipy==1.5.4
- send2trash==1.8.3
- six==1.17.0
- snowballstemmer==2.2.0
- soupsieve==2.3.2.post1
- sphinx==1.5.6
- sphinx-rtd-theme==0.5.2
- tenacity==8.2.2
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- widgetsnbextension==3.6.10
prefix: /opt/conda/envs/poliastro
| [
"src/poliastro/tests/tests_twobody/test_orbit.py::test_sample_with_time_value",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_sample_with_nu_value",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_hyperbolic_nu_value_check",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_angle_zero_returns_same"
] | [] | [
"src/poliastro/tests/tests_twobody/test_orbit.py::test_default_time_for_new_state",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_state_raises_unitserror_if_elements_units_are_wrong",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_state_raises_unitserror_if_rv_units_are_wrong",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_parabolic_elements_fail_early",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_bad_inclination_raises_exception",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_bad_hyperbolic_raises_exception",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_apply_maneuver_changes_epoch",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_orbit_from_ephem_with_no_epoch_is_today",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_from_ephem_raises_warning_if_time_is_not_tdb_with_proper_time",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_circular_has_proper_semimajor_axis",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_geosync_has_proper_period",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_parabolic_has_proper_eccentricity",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_parabolic_has_zero_energy",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_pqw_for_circular_equatorial_orbit",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_orbit_representation",
"src/poliastro/tests/tests_twobody/test_orbit.py::test_sample_numpoints",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_one_point_equals_propagation_small_deltas[kepler-time_of_flight0]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_one_point_equals_propagation_small_deltas[kepler-time_of_flight1]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_one_point_equals_propagation_small_deltas[mean_motion-time_of_flight0]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_one_point_equals_propagation_small_deltas[mean_motion-time_of_flight1]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_one_point_equals_propagation_small_deltas[cowell-time_of_flight0]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_one_point_equals_propagation_small_deltas[cowell-time_of_flight1]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_one_point_equals_propagation_big_deltas[kepler-time_of_flight0]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_one_point_equals_propagation_big_deltas[kepler-time_of_flight1]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_one_point_equals_propagation_big_deltas[mean_motion-time_of_flight0]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_one_point_equals_propagation_big_deltas[mean_motion-time_of_flight1]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_one_point_equals_propagation_big_deltas[cowell-time_of_flight0]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_one_point_equals_propagation_big_deltas[cowell-time_of_flight1]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_nu_values",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_num_points[3]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_num_points[5]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_num_points[7]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_num_points[9]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_num_points[11]",
"src/poliastro/tests/tests_twobody/test_sample.py::test_sample_num_points[101]"
] | [] | MIT License | 2,229 | 1,097 | [
"src/poliastro/plotting.py",
"src/poliastro/twobody/orbit.py"
] |
|
nipy__nipype-2479 | e6792158568a51f0e6cdef77c6ca12ab6266a7dd | 2018-03-01 22:23:30 | 704b97dee7848283692bac38f04541c5af2a87b5 | diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py
index 2b6bb6ed3..301a35844 100644
--- a/nipype/pipeline/engine/utils.py
+++ b/nipype/pipeline/engine/utils.py
@@ -1050,7 +1050,17 @@ def generate_expanded_graph(graph_in):
expansions = defaultdict(list)
for node in graph_in.nodes():
for src_id in list(old_edge_dict.keys()):
- if node.itername.startswith(src_id):
+ # Drop the original JoinNodes; only concerned with
+ # generated Nodes
+ if hasattr(node, 'joinfield'):
+ continue
+ # Patterns:
+ # - src_id : Non-iterable node
+ # - src_id.[a-z]\d+ : IdentityInterface w/ iterables
+ # - src_id.[a-z]I.[a-z]\d+ : Non-IdentityInterface w/ iterables
+ # - src_idJ\d+ : JoinNode(IdentityInterface)
+ if re.match(src_id + r'((\.[a-z](I\.[a-z])?|J)\d+)?$',
+ node.itername):
expansions[src_id].append(node)
for in_id, in_nodes in list(expansions.items()):
logger.debug("The join node %s input %s was expanded"
| Issue with node name that starts with another node's name
I think the [line ~801 in util.py](https://github.com/nipy/nipype/edit/master/nipype/pipeline/engine/utils.py#L801) should be something like this:
for node in graph_in.nodes():
for src_id, edge_data in list(old_edge_dict.items()):
if node.itername.startswith(src_id + '.'): # <-- add '.' to src_id
expansions[src_id].append(node)
For example, if the node "input" feeds into "input_files", the "input_files" can be included if you just test for node.itername.startswith(src_id). This change would prevent "input_files" from being included.
Edit: removed last part of my comment. | nipy/nipype | diff --git a/nipype/pipeline/engine/tests/test_join.py b/nipype/pipeline/engine/tests/test_join.py
index 436d29d9e..54ff15048 100644
--- a/nipype/pipeline/engine/tests/test_join.py
+++ b/nipype/pipeline/engine/tests/test_join.py
@@ -7,11 +7,9 @@ from __future__ import (print_function, division, unicode_literals,
absolute_import)
from builtins import open
-import os
-
from ... import engine as pe
from ....interfaces import base as nib
-from ....interfaces.utility import IdentityInterface
+from ....interfaces.utility import IdentityInterface, Function, Merge
from ....interfaces.base import traits, File
@@ -612,3 +610,20 @@ def test_nested_workflow_join(tmpdir):
# there should be six nodes in total
assert len(result.nodes()) == 6, \
"The number of expanded nodes is incorrect."
+
+
+def test_name_prefix_join(tmpdir):
+ tmpdir.chdir()
+
+ def sq(x):
+ return x ** 2
+
+ wf = pe.Workflow('wf', base_dir=tmpdir.strpath)
+ square = pe.Node(Function(function=sq), name='square')
+ square.iterables = [('x', [1, 2])]
+ square_join = pe.JoinNode(Merge(1, ravel_inputs=True),
+ name='square_join',
+ joinsource='square',
+ joinfield=['in1'])
+ wf.connect(square, 'out', square_join, "in1")
+ wf.run()
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
click==8.0.4
configparser==5.2.0
decorator==4.4.2
funcsigs==1.0.2
future==1.0.0
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
lxml==5.3.1
mock==5.2.0
networkx==2.5.1
nibabel==3.2.2
-e git+https://github.com/nipy/nipype.git@e6792158568a51f0e6cdef77c6ca12ab6266a7dd#egg=nipype
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
prov==1.5.0
py==1.11.0
pydot==1.4.2
pydotplus==2.0.2
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
rdflib==5.0.0
scipy==1.5.4
simplejson==3.20.1
six==1.17.0
tomli==1.2.3
traits==6.4.1
typing_extensions==4.1.1
zipp==3.6.0
| name: nipype
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- click==8.0.4
- configparser==5.2.0
- decorator==4.4.2
- funcsigs==1.0.2
- future==1.0.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- lxml==5.3.1
- mock==5.2.0
- networkx==2.5.1
- nibabel==3.2.2
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- prov==1.5.0
- py==1.11.0
- pydot==1.4.2
- pydotplus==2.0.2
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- rdflib==5.0.0
- scipy==1.5.4
- simplejson==3.20.1
- six==1.17.0
- tomli==1.2.3
- traits==6.4.1
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/nipype
| [
"nipype/pipeline/engine/tests/test_join.py::test_name_prefix_join"
] | [] | [
"nipype/pipeline/engine/tests/test_join.py::test_join_expansion",
"nipype/pipeline/engine/tests/test_join.py::test_node_joinsource",
"nipype/pipeline/engine/tests/test_join.py::test_set_join_node",
"nipype/pipeline/engine/tests/test_join.py::test_unique_join_node",
"nipype/pipeline/engine/tests/test_join.py::test_multiple_join_nodes",
"nipype/pipeline/engine/tests/test_join.py::test_identity_join_node",
"nipype/pipeline/engine/tests/test_join.py::test_multifield_join_node",
"nipype/pipeline/engine/tests/test_join.py::test_synchronize_join_node",
"nipype/pipeline/engine/tests/test_join.py::test_itersource_join_source_node",
"nipype/pipeline/engine/tests/test_join.py::test_itersource_two_join_nodes",
"nipype/pipeline/engine/tests/test_join.py::test_set_join_node_file_input",
"nipype/pipeline/engine/tests/test_join.py::test_nested_workflow_join"
] | [] | Apache License 2.0 | 2,236 | 322 | [
"nipype/pipeline/engine/utils.py"
] |
|
pydicom__pydicom-588 | da6f7917ca2a32a6886e023a71b0b095f5bc06c8 | 2018-03-03 19:44:44 | fcc63f0b96fb370b0eb60b2c765b469ce62e597c | diff --git a/pydicom/dataset.py b/pydicom/dataset.py
index 1052b8587..8d12864bb 100644
--- a/pydicom/dataset.py
+++ b/pydicom/dataset.py
@@ -28,8 +28,8 @@ from pydicom.datadict import (tag_for_keyword, keyword_for_tag,
repeater_has_keyword)
from pydicom.tag import Tag, BaseTag, tag_in_exception
from pydicom.dataelem import DataElement, DataElement_from_raw, RawDataElement
-from pydicom.uid import (UncompressedPixelTransferSyntaxes,
- ExplicitVRLittleEndian)
+from pydicom.uid import (UncompressedPixelTransferSyntaxes,
+ ExplicitVRLittleEndian)
import pydicom # for dcmwrite
import pydicom.charset
from pydicom.config import logger
@@ -400,14 +400,13 @@ class Dataset(dict):
return True
if isinstance(other, self.__class__):
- # Compare Elements using values() and class variables using
- # __dict__
+ # Compare Elements using values()
# Convert values() to a list for compatibility between
# python 2 and 3
# Sort values() by element tag
self_elem = sorted(list(self.values()), key=lambda x: x.tag)
other_elem = sorted(list(other.values()), key=lambda x: x.tag)
- return self_elem == other_elem and self.__dict__ == other.__dict__
+ return self_elem == other_elem
return NotImplemented
@@ -697,9 +696,9 @@ class Dataset(dict):
Returns
-------
- None
+ None
Converted pixel data is stored internally in the dataset.
-
+
If a compressed image format, the image is decompressed,
and any related data elements are changed accordingly.
"""
@@ -720,7 +719,9 @@ class Dataset(dict):
pixel_array = x.get_pixeldata(self)
self._pixel_array = self._reshape_pixel_array(pixel_array)
if x.needs_to_convert_to_RGB(self):
- self._pixel_array = self._convert_YBR_to_RGB(self._pixel_array)
+ self._pixel_array = self._convert_YBR_to_RGB(
+ self._pixel_array
+ )
successfully_read_pixel_data = True
break
except Exception as e:
@@ -743,31 +744,31 @@ class Dataset(dict):
raise NotImplementedError(msg)
# is this guaranteed to work if memory is re-used??
self._pixel_id = id(self.PixelData)
-
+
def decompress(self):
"""Decompresses pixel data and modifies the Dataset in-place
- If not a compressed tranfer syntax, then pixel data is converted
- to a numpy array internally, but not returned.
-
- If compressed pixel data, then is decompressed using an image handler,
- and internal state is updated appropriately:
- - TransferSyntax is updated to non-compressed form
- - is_undefined_length for pixel data is set False
+ If not a compressed tranfer syntax, then pixel data is converted
+ to a numpy array internally, but not returned.
+
+ If compressed pixel data, then is decompressed using an image handler,
+ and internal state is updated appropriately:
+ - TransferSyntax is updated to non-compressed form
+ - is_undefined_length for pixel data is set False
Returns
-------
None
- Raises
+ Raises
------
NotImplementedError
If the pixel data was originally compressed but file is not
- ExplicitVR LittleEndian as required by Dicom standard
- """
+ ExplicitVR LittleEndian as required by Dicom standard
+ """
self.convert_pixel_data()
self.is_decompressed = True
- # May have been undefined length pixel data, but won't be now
+ # May have been undefined length pixel data, but won't be now
if 'PixelData' in self:
self[0x7fe00010].is_undefined_length = False
@@ -780,14 +781,13 @@ class Dataset(dict):
# Check that current file as read does match expected
if not self.is_little_endian or self.is_implicit_VR:
msg = ("Current dataset does not match expected ExplicitVR "
- "LittleEndian transfer syntax from a compressed "
+ "LittleEndian transfer syntax from a compressed "
"transfer syntax")
raise NotImplementedError(msg)
-
+
# All is as expected, updated the Transfer Syntax
self.file_meta.TransferSyntaxUID = ExplicitVRLittleEndian
-
@property
def pixel_array(self):
"""Return the Pixel Data as a NumPy array.
@@ -1233,3 +1233,29 @@ class FileDataset(Dataset):
if self.filename and os.path.exists(self.filename):
statinfo = os.stat(self.filename)
self.timestamp = statinfo.st_mtime
+
+ def __eq__(self, other):
+ """Compare `self` and `other` for equality.
+
+ Returns
+ -------
+ bool
+ The result if `self` and `other` are the same class
+ NotImplemented
+ If `other` is not the same class as `self` then returning
+ NotImplemented delegates the result to superclass.__eq__(subclass)
+ """
+ # When comparing against self this will be faster
+ if other is self:
+ return True
+
+ if isinstance(other, self.__class__):
+ # Compare Elements using values() and class members using __dict__
+ # Convert values() to a list for compatibility between
+ # python 2 and 3
+ # Sort values() by element tag
+ self_elem = sorted(list(self.values()), key=lambda x: x.tag)
+ other_elem = sorted(list(other.values()), key=lambda x: x.tag)
+ return self_elem == other_elem and self.__dict__ == other.__dict__
+
+ return NotImplemented
diff --git a/pydicom/filewriter.py b/pydicom/filewriter.py
index ceb809915..01d9a4911 100644
--- a/pydicom/filewriter.py
+++ b/pydicom/filewriter.py
@@ -226,7 +226,10 @@ def write_PN(fp, data_element, padding=b' ', encoding=None):
val = data_element.value
if isinstance(val[0], compat.text_type) or not in_py2:
- val = [elem.encode(encoding) for elem in val]
+ try:
+ val = [elem.encode(encoding) for elem in val]
+ except TypeError:
+ val = [elem.encode(encoding[0]) for elem in val]
val = b'\\'.join(val)
| No encoding with write_PN raises TypeError
#### Description
`filewriter.write_PN()` with `encoding=None` (default) raises `TypeError`
#### Steps/Code to Reproduce
```python
>>> from pydicom.filebase import DicomBytesIO
>>> from pydicom.dataelem import DataElement
>>> from pydicom.filewriter import write_PN
>>> fp = DicomBytesIO()
>>> fp.is_little_endian = True
>>> elem = DataElement(0x00100010, 'PN', u'\u03b8')
>>> write_PN(fp, elem)
```
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "../pydicom/pydicom/filewriter.py", line 228, in write_PN
val = [elem.encode(encoding) for elem in val]
TypeError: encode() argument 1 must be string, not list
```
Occurs because when `encoding=None`, `encoding = [default] * 3` is used.
I'm not sure what the impact is in practise since `write_PN` only seems to get called internally with encoding not set to None.
#### Versions
Python 2.7
| pydicom/pydicom | diff --git a/pydicom/tests/test_dataset.py b/pydicom/tests/test_dataset.py
index fee7082bc..822b04e79 100644
--- a/pydicom/tests/test_dataset.py
+++ b/pydicom/tests/test_dataset.py
@@ -403,32 +403,35 @@ class DatasetTests(unittest.TestCase):
def testEqualityNoSequence(self):
"""Dataset: equality returns correct value with simple dataset"""
+ # Test empty dataset
+ assert Dataset() == Dataset()
+
d = Dataset()
d.SOPInstanceUID = '1.2.3.4'
d.PatientName = 'Test'
- self.assertTrue(d == d)
+ assert d == d
e = Dataset()
e.PatientName = 'Test'
e.SOPInstanceUID = '1.2.3.4'
- self.assertTrue(d == e)
+ assert d == e
e.SOPInstanceUID = '1.2.3.5'
- self.assertFalse(d == e)
+ assert not d == e
# Check VR
del e.SOPInstanceUID
e.add(DataElement(0x00080018, 'PN', '1.2.3.4'))
- self.assertFalse(d == e)
+ assert not d == e
# Check Tag
del e.SOPInstanceUID
e.StudyInstanceUID = '1.2.3.4'
- self.assertFalse(d == e)
+ assert not d == e
# Check missing Element in self
e.SOPInstanceUID = '1.2.3.4'
- self.assertFalse(d == e)
+ assert not d == e
# Check missing Element in other
d = Dataset()
@@ -437,7 +440,7 @@ class DatasetTests(unittest.TestCase):
e = Dataset()
e.SOPInstanceUID = '1.2.3.4'
- self.assertFalse(d == e)
+ assert not d == e
def testEqualityPrivate(self):
"""Dataset: equality returns correct value"""
@@ -500,16 +503,14 @@ class DatasetTests(unittest.TestCase):
def testEqualityUnknown(self):
"""Dataset: equality returns correct value with extra members """
+ # Non-element class members are ignored in equality testing
d = Dataset()
d.SOPEustaceUID = '1.2.3.4'
- self.assertTrue(d == d)
+ assert d == d
e = Dataset()
- e.SOPEustaceUID = '1.2.3.4'
- self.assertTrue(d == e)
-
e.SOPEustaceUID = '1.2.3.5'
- self.assertFalse(d == e)
+ assert d == e
def testEqualityInheritance(self):
"""Dataset: equality returns correct value for subclass """
@@ -529,6 +530,19 @@ class DatasetTests(unittest.TestCase):
self.assertFalse(d == e)
self.assertFalse(e == d)
+ def test_equality_elements(self):
+ """Test that Dataset equality only checks DataElements."""
+ d = Dataset()
+ d.SOPInstanceUID = '1.2.3.4'
+ d.PatientName = 'Test'
+ d.foo = 'foo'
+ assert d == d
+
+ e = Dataset()
+ e.PatientName = 'Test'
+ e.SOPInstanceUID = '1.2.3.4'
+ assert d == e
+
def test_inequality(self):
"""Test inequality operator"""
d = Dataset()
diff --git a/pydicom/tests/test_filewriter.py b/pydicom/tests/test_filewriter.py
index 9ed2347ea..6e829300b 100644
--- a/pydicom/tests/test_filewriter.py
+++ b/pydicom/tests/test_filewriter.py
@@ -1826,7 +1826,6 @@ class TestWriteNumbers(object):
class TestWritePN(object):
"""Test filewriter.write_PN"""
- @pytest.mark.skip("Raises exception due to issue #489")
def test_no_encoding_unicode(self):
"""If PN element has no encoding info, default is used"""
fp = DicomBytesIO()
@@ -1979,12 +1978,11 @@ class TestWriteNumbers(object):
class TestWritePN(object):
"""Test filewriter.write_PN"""
- @pytest.mark.skip("Raises exception due to issue #489")
def test_no_encoding_unicode(self):
"""If PN element as no encoding info, default is used"""
fp = DicomBytesIO()
fp.is_little_endian = True
- elem = DataElement(0x00100010, 'PN', u'\u03b8')
+ elem = DataElement(0x00100010, 'PN', u'\u00e8')
write_PN(fp, elem)
def test_no_encoding(self):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 2
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/pydicom/pydicom.git@da6f7917ca2a32a6886e023a71b0b095f5bc06c8#egg=pydicom
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: pydicom
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/pydicom
| [
"pydicom/tests/test_dataset.py::DatasetTests::testEqualityUnknown",
"pydicom/tests/test_dataset.py::DatasetTests::test_equality_elements"
] | [
"pydicom/tests/test_dataset.py::DatasetTests::test_get_item"
] | [
"pydicom/tests/test_dataset.py::DatasetTests::testAttributeErrorInProperty",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteDicomAttr",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteDicomAttrWeDontHave",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteDicomCommandGroupLength",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteItemLong",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteItemTuple",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteNonExistingItem",
"pydicom/tests/test_dataset.py::DatasetTests::testDeleteOtherAttr",
"pydicom/tests/test_dataset.py::DatasetTests::testEqualityInheritance",
"pydicom/tests/test_dataset.py::DatasetTests::testEqualityNoSequence",
"pydicom/tests/test_dataset.py::DatasetTests::testEqualityNotDataset",
"pydicom/tests/test_dataset.py::DatasetTests::testEqualityPrivate",
"pydicom/tests/test_dataset.py::DatasetTests::testEqualitySequence",
"pydicom/tests/test_dataset.py::DatasetTests::testGetDefault1",
"pydicom/tests/test_dataset.py::DatasetTests::testGetDefault2",
"pydicom/tests/test_dataset.py::DatasetTests::testGetDefault3",
"pydicom/tests/test_dataset.py::DatasetTests::testGetDefault4",
"pydicom/tests/test_dataset.py::DatasetTests::testGetExists1",
"pydicom/tests/test_dataset.py::DatasetTests::testGetExists2",
"pydicom/tests/test_dataset.py::DatasetTests::testGetExists3",
"pydicom/tests/test_dataset.py::DatasetTests::testGetExists4",
"pydicom/tests/test_dataset.py::DatasetTests::testGetFromRaw",
"pydicom/tests/test_dataset.py::DatasetTests::testHash",
"pydicom/tests/test_dataset.py::DatasetTests::testMembership",
"pydicom/tests/test_dataset.py::DatasetTests::testSetExistingDataElementByName",
"pydicom/tests/test_dataset.py::DatasetTests::testSetNewDataElementByName",
"pydicom/tests/test_dataset.py::DatasetTests::testSetNonDicom",
"pydicom/tests/test_dataset.py::DatasetTests::testTagExceptionPrint",
"pydicom/tests/test_dataset.py::DatasetTests::testTagExceptionWalk",
"pydicom/tests/test_dataset.py::DatasetTests::testUpdate",
"pydicom/tests/test_dataset.py::DatasetTests::test_NamedMemberUpdated",
"pydicom/tests/test_dataset.py::DatasetTests::test__setitem__",
"pydicom/tests/test_dataset.py::DatasetTests::test_add_repeater_elem_by_keyword",
"pydicom/tests/test_dataset.py::DatasetTests::test_attribute_error_in_property_correct_debug",
"pydicom/tests/test_dataset.py::DatasetTests::test_contains",
"pydicom/tests/test_dataset.py::DatasetTests::test_data_element",
"pydicom/tests/test_dataset.py::DatasetTests::test_delitem_slice",
"pydicom/tests/test_dataset.py::DatasetTests::test_dir",
"pydicom/tests/test_dataset.py::DatasetTests::test_dir_filter",
"pydicom/tests/test_dataset.py::DatasetTests::test_dir_subclass",
"pydicom/tests/test_dataset.py::DatasetTests::test_empty_slice",
"pydicom/tests/test_dataset.py::DatasetTests::test_exit_exception",
"pydicom/tests/test_dataset.py::DatasetTests::test_formatted_lines",
"pydicom/tests/test_dataset.py::DatasetTests::test_get_pixel_array_already_have",
"pydicom/tests/test_dataset.py::DatasetTests::test_get_raises",
"pydicom/tests/test_dataset.py::DatasetTests::test_getitem_slice",
"pydicom/tests/test_dataset.py::DatasetTests::test_getitem_slice_raises",
"pydicom/tests/test_dataset.py::DatasetTests::test_group_dataset",
"pydicom/tests/test_dataset.py::DatasetTests::test_inequality",
"pydicom/tests/test_dataset.py::DatasetTests::test_is_uncompressed_transfer_syntax",
"pydicom/tests/test_dataset.py::DatasetTests::test_iterall",
"pydicom/tests/test_dataset.py::DatasetTests::test_matching_tags",
"pydicom/tests/test_dataset.py::DatasetTests::test_property",
"pydicom/tests/test_dataset.py::DatasetTests::test_remove_private_tags",
"pydicom/tests/test_dataset.py::DatasetTests::test_reshape_pixel_array_not_implemented",
"pydicom/tests/test_dataset.py::DatasetTests::test_save_as",
"pydicom/tests/test_dataset.py::DatasetTests::test_set_convert_private_elem_from_raw",
"pydicom/tests/test_dataset.py::DatasetTests::test_setitem_slice_raises",
"pydicom/tests/test_dataset.py::DatasetTests::test_top",
"pydicom/tests/test_dataset.py::DatasetTests::test_trait_names",
"pydicom/tests/test_dataset.py::DatasetTests::test_walk",
"pydicom/tests/test_dataset.py::DatasetTests::test_with",
"pydicom/tests/test_dataset.py::DatasetElementsTests::testSequenceAssignment",
"pydicom/tests/test_dataset.py::FileDatasetTests::test_creation_with_container",
"pydicom/tests/test_dataset.py::FileDatasetTests::test_equality_file_meta",
"pydicom/tests/test_filewriter.py::WriteFileTests::testCT",
"pydicom/tests/test_filewriter.py::WriteFileTests::testJPEG2000",
"pydicom/tests/test_filewriter.py::WriteFileTests::testListItemWriteBack",
"pydicom/tests/test_filewriter.py::WriteFileTests::testMR",
"pydicom/tests/test_filewriter.py::WriteFileTests::testMultiPN",
"pydicom/tests/test_filewriter.py::WriteFileTests::testRTDose",
"pydicom/tests/test_filewriter.py::WriteFileTests::testRTPlan",
"pydicom/tests/test_filewriter.py::WriteFileTests::testUnicode",
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_double_filemeta",
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_no_ts",
"pydicom/tests/test_filewriter.py::WriteFileTests::testwrite_short_uid",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testCT",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testJPEG2000",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testListItemWriteBack",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testMR",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testMultiPN",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testRTDose",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testRTPlan",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testUnicode",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_multivalue_DA",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_double_filemeta",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_no_ts",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testwrite_short_uid",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_empty_AT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_DA",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_DT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OD_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OD_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OL_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OL_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_TM",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UC_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UC_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UN_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UR_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UR_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_empty_LO",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_DA",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_DT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_TM",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_unknown_vr_raises",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_lut_descriptor",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_overlay",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_data",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_representation_vm_one",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_representation_vm_three",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_sequence",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_waveform_bits_allocated",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_not_ambiguous",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_pixel_data_not_ow_or_ob",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_big_endian",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_little_endian",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_raises",
"pydicom/tests/test_filewriter.py::ScratchWriteTests::testImpl_LE_deflen_write",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_preamble_default",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_preamble_custom",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_no_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_none_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_bad_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_prefix",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_prefix_none",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_ds_changed",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_not_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_raises",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_media_storage_sop_class_uid_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_write_no_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_raise_no_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_add_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_standard",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_commandset_no_written",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_bad_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_missing_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_group_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_group_length_updated",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_version",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_implementation_version_name_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_implementation_class_uid_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_filelike_position",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_filemeta",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_ds_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_file_meta_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_no_preamble",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_filemeta",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_custom",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_default",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_read_write_identical",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_bad_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_filelike_position",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_group_length_updated",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_meta_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_missing_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_transfer_syntax_not_added",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_empty_value",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_list",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_singleton",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_exception",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_big_endian",
"pydicom/tests/test_filewriter.py::TestWritePN::test_no_encoding_unicode",
"pydicom/tests/test_filewriter.py::TestWritePN::test_no_encoding",
"pydicom/tests/test_filewriter.py::TestWriteDT::test_format_dt",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_big_endian_correct_data",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_big_endian_incorrect_data",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_little_endian_correct_data",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_little_endian_incorrect_data"
] | [] | MIT License | 2,246 | 1,547 | [
"pydicom/dataset.py",
"pydicom/filewriter.py"
] |
|
pre-commit__pre-commit-718 | ac3a37d1a0e3575bddf23fd9babf6e56202b2988 | 2018-03-03 23:24:53 | ac3a37d1a0e3575bddf23fd9babf6e56202b2988 | diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py
index 83b97cb..9191222 100644
--- a/pre_commit/commands/install_uninstall.py
+++ b/pre_commit/commands/install_uninstall.py
@@ -2,15 +2,19 @@ from __future__ import print_function
from __future__ import unicode_literals
import io
+import logging
import os.path
import sys
from pre_commit import output
+from pre_commit.util import cmd_output
from pre_commit.util import make_executable
from pre_commit.util import mkdirp
from pre_commit.util import resource_filename
+logger = logging.getLogger(__name__)
+
# This is used to identify the hook file we install
PRIOR_HASHES = (
'4d9958c90bc262f47553e2c073f14cfe',
@@ -36,6 +40,13 @@ def install(
skip_on_missing_conf=False,
):
"""Install the pre-commit hooks."""
+ if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():
+ logger.error(
+ 'Cowardly refusing to install hooks with `core.hooksPath` set.\n'
+ 'hint: `git config --unset-all core.hooksPath`',
+ )
+ return 1
+
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
| Handle when `core.hooksPath` is set?
As we found in https://github.com/pre-commit/pre-commit-hooks/issues/250, pre-commit (despite being installed) will be silently skipped if `code.hooksPath` is set.
A few options:
- during `pre-commit install`, check this variable and warn
- "" but error
- install into the directory at `core.hooksPath` (but it may be outside the working dir? probably not the best idea to write to it) | pre-commit/pre-commit | diff --git a/tests/commands/install_uninstall_test.py b/tests/commands/install_uninstall_test.py
index a49a3e4..f83708e 100644
--- a/tests/commands/install_uninstall_test.py
+++ b/tests/commands/install_uninstall_test.py
@@ -66,6 +66,14 @@ def test_install_hooks_directory_not_present(tempdir_factory):
assert os.path.exists(runner.pre_commit_path)
+def test_install_refuses_core_hookspath(tempdir_factory):
+ path = git_dir(tempdir_factory)
+ with cwd(path):
+ cmd_output('git', 'config', '--local', 'core.hooksPath', 'hooks')
+ runner = Runner(path, C.CONFIG_FILE)
+ assert install(runner)
+
+
@xfailif_no_symlink
def test_install_hooks_dead_symlink(
tempdir_factory,
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 1.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aspy.yaml==1.3.0
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
cfgv==3.3.1
coverage==6.2
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
identify==2.4.4
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
mccabe==0.7.0
mock==5.2.0
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
-e git+https://github.com/pre-commit/pre-commit.git@ac3a37d1a0e3575bddf23fd9babf6e56202b2988#egg=pre_commit
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-env==0.6.2
PyYAML==6.0.1
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
virtualenv==20.17.1
zipp==3.6.0
| name: pre-commit
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aspy-yaml==1.3.0
- attrs==22.2.0
- cached-property==1.5.2
- cfgv==3.3.1
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- identify==2.4.4
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- mccabe==0.7.0
- mock==5.2.0
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-env==0.6.2
- pyyaml==6.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/pre-commit
| [
"tests/commands/install_uninstall_test.py::test_install_refuses_core_hookspath"
] | [
"tests/commands/install_uninstall_test.py::test_install_in_submodule_and_run",
"tests/commands/install_uninstall_test.py::test_environment_not_sourced"
] | [
"tests/commands/install_uninstall_test.py::test_is_not_script",
"tests/commands/install_uninstall_test.py::test_is_script",
"tests/commands/install_uninstall_test.py::test_is_previous_pre_commit",
"tests/commands/install_uninstall_test.py::test_install_pre_commit",
"tests/commands/install_uninstall_test.py::test_install_hooks_directory_not_present",
"tests/commands/install_uninstall_test.py::test_install_hooks_dead_symlink",
"tests/commands/install_uninstall_test.py::test_uninstall_does_not_blow_up_when_not_there",
"tests/commands/install_uninstall_test.py::test_uninstall",
"tests/commands/install_uninstall_test.py::test_install_pre_commit_and_run",
"tests/commands/install_uninstall_test.py::test_install_pre_commit_and_run_custom_path",
"tests/commands/install_uninstall_test.py::test_commit_am",
"tests/commands/install_uninstall_test.py::test_unicode_merge_commit_message",
"tests/commands/install_uninstall_test.py::test_install_idempotent",
"tests/commands/install_uninstall_test.py::test_failing_hooks_returns_nonzero",
"tests/commands/install_uninstall_test.py::test_install_existing_hooks_no_overwrite",
"tests/commands/install_uninstall_test.py::test_install_existing_hook_no_overwrite_idempotent",
"tests/commands/install_uninstall_test.py::test_failing_existing_hook_returns_1",
"tests/commands/install_uninstall_test.py::test_install_overwrite_no_existing_hooks",
"tests/commands/install_uninstall_test.py::test_install_overwrite",
"tests/commands/install_uninstall_test.py::test_uninstall_restores_legacy_hooks",
"tests/commands/install_uninstall_test.py::test_replace_old_commit_script",
"tests/commands/install_uninstall_test.py::test_uninstall_doesnt_remove_not_our_hooks",
"tests/commands/install_uninstall_test.py::test_installs_hooks_with_hooks_True",
"tests/commands/install_uninstall_test.py::test_install_hooks_command",
"tests/commands/install_uninstall_test.py::test_installed_from_venv",
"tests/commands/install_uninstall_test.py::test_pre_push_integration_failing",
"tests/commands/install_uninstall_test.py::test_pre_push_integration_accepted",
"tests/commands/install_uninstall_test.py::test_pre_push_new_upstream",
"tests/commands/install_uninstall_test.py::test_pre_push_integration_empty_push",
"tests/commands/install_uninstall_test.py::test_pre_push_legacy",
"tests/commands/install_uninstall_test.py::test_commit_msg_integration_failing",
"tests/commands/install_uninstall_test.py::test_commit_msg_integration_passing",
"tests/commands/install_uninstall_test.py::test_commit_msg_legacy",
"tests/commands/install_uninstall_test.py::test_install_disallow_mising_config",
"tests/commands/install_uninstall_test.py::test_install_allow_mising_config",
"tests/commands/install_uninstall_test.py::test_install_temporarily_allow_mising_config"
] | [] | MIT License | 2,247 | 344 | [
"pre_commit/commands/install_uninstall.py"
] |
|
pytorch__ignite-92 | 0b4aec7629390ed797782fede8e3f11fe7c549f7 | 2018-03-06 16:49:31 | 0b4aec7629390ed797782fede8e3f11fe7c549f7 | jasonkriss: @alykhantejani I just added MeanPairwiseDistance to this PR. | diff --git a/ignite/metrics/__init__.py b/ignite/metrics/__init__.py
index 3902c5de..2cc4e1ff 100644
--- a/ignite/metrics/__init__.py
+++ b/ignite/metrics/__init__.py
@@ -1,3 +1,8 @@
+from .binary_accuracy import BinaryAccuracy
from .categorical_accuracy import CategoricalAccuracy
+from .mean_absolute_error import MeanAbsoluteError
+from .mean_pairwise_distance import MeanPairwiseDistance
from .mean_squared_error import MeanSquaredError
from .metric import Metric
+from .root_mean_squared_error import RootMeanSquaredError
+from .top_k_categorical_accuracy import TopKCategoricalAccuracy
diff --git a/ignite/metrics/binary_accuracy.py b/ignite/metrics/binary_accuracy.py
new file mode 100644
index 00000000..3b33a4db
--- /dev/null
+++ b/ignite/metrics/binary_accuracy.py
@@ -0,0 +1,28 @@
+from __future__ import division
+
+import torch
+
+from .metric import Metric
+from ignite.exceptions import NotComputableError
+
+
+class BinaryAccuracy(Metric):
+ """
+ Calculates the binary accuracy.
+
+ `update` must receive output of the form (y_pred, y).
+ """
+ def reset(self):
+ self._num_correct = 0
+ self._num_examples = 0
+
+ def update(self, output):
+ y_pred, y = output
+ correct = torch.eq(torch.round(y_pred).type(torch.LongTensor), y)
+ self._num_correct += torch.sum(correct)
+ self._num_examples += correct.shape[0]
+
+ def compute(self):
+ if self._num_examples == 0:
+ raise NotComputableError('BinaryAccuracy must have at least one example before it can be computed')
+ return self._num_correct / self._num_examples
diff --git a/ignite/metrics/mean_absolute_error.py b/ignite/metrics/mean_absolute_error.py
new file mode 100644
index 00000000..88c620de
--- /dev/null
+++ b/ignite/metrics/mean_absolute_error.py
@@ -0,0 +1,28 @@
+from __future__ import division
+
+import torch
+
+from .metric import Metric
+from ignite.exceptions import NotComputableError
+
+
+class MeanAbsoluteError(Metric):
+ """
+ Calculates the mean absolute error.
+
+ `update` must receive output of the form (y_pred, y).
+ """
+ def reset(self):
+ self._sum_of_absolute_errors = 0.0
+ self._num_examples = 0
+
+ def update(self, output):
+ y_pred, y = output
+ absolute_errors = torch.abs(y_pred - y.view_as(y_pred))
+ self._sum_of_absolute_errors += torch.sum(absolute_errors)
+ self._num_examples += y.shape[0]
+
+ def compute(self):
+ if self._num_examples == 0:
+ raise NotComputableError('MeanAbsoluteError must have at least one example before it can be computed')
+ return self._sum_of_absolute_errors / self._num_examples
diff --git a/ignite/metrics/mean_pairwise_distance.py b/ignite/metrics/mean_pairwise_distance.py
new file mode 100644
index 00000000..b18be661
--- /dev/null
+++ b/ignite/metrics/mean_pairwise_distance.py
@@ -0,0 +1,34 @@
+from __future__ import division
+
+import torch
+from torch.nn.functional import pairwise_distance
+
+from .metric import Metric
+from ignite.exceptions import NotComputableError
+
+
+class MeanPairwiseDistance(Metric):
+ """
+ Calculates the mean pairwise distance.
+
+ `update` must receive output of the form (y_pred, y).
+ """
+ def __init__(self, p=2, eps=1e-6):
+ super(MeanPairwiseDistance, self).__init__()
+ self._p = p
+ self._eps = eps
+
+ def reset(self):
+ self._sum_of_distances = 0.0
+ self._num_examples = 0
+
+ def update(self, output):
+ y_pred, y = output
+ distances = pairwise_distance(y_pred, y, p=self._p, eps=self._eps)
+ self._sum_of_distances += torch.sum(distances)
+ self._num_examples += y.shape[0]
+
+ def compute(self):
+ if self._num_examples == 0:
+ raise NotComputableError('MeanAbsoluteError must have at least one example before it can be computed')
+ return self._sum_of_distances / self._num_examples
diff --git a/ignite/metrics/root_mean_squared_error.py b/ignite/metrics/root_mean_squared_error.py
new file mode 100644
index 00000000..47e9f396
--- /dev/null
+++ b/ignite/metrics/root_mean_squared_error.py
@@ -0,0 +1,18 @@
+from __future__ import division
+import math
+
+import torch
+
+from .mean_squared_error import MeanSquaredError
+from ignite.exceptions import NotComputableError
+
+
+class RootMeanSquaredError(MeanSquaredError):
+ """
+ Calculates the root mean squared error.
+
+ `update` must receive output of the form (y_pred, y).
+ """
+ def compute(self):
+ mse = super(RootMeanSquaredError, self).compute()
+ return math.sqrt(mse)
diff --git a/ignite/metrics/top_k_categorical_accuracy.py b/ignite/metrics/top_k_categorical_accuracy.py
new file mode 100644
index 00000000..1ee90353
--- /dev/null
+++ b/ignite/metrics/top_k_categorical_accuracy.py
@@ -0,0 +1,34 @@
+from __future__ import division
+
+import torch
+
+from .metric import Metric
+from ignite.exceptions import NotComputableError
+
+
+class TopKCategoricalAccuracy(Metric):
+ """
+ Calculates the top-k categorical accuracy.
+
+ `update` must receive output of the form (y_pred, y).
+ """
+ def __init__(self, k=5):
+ super(TopKCategoricalAccuracy, self).__init__()
+ self._k = k
+
+ def reset(self):
+ self._num_correct = 0
+ self._num_examples = 0
+
+ def update(self, output):
+ y_pred, y = output
+ sorted_indices = torch.topk(y_pred, self._k, dim=1)[1]
+ expanded_y = y.view(-1, 1).expand(-1, self._k)
+ correct = torch.sum(torch.eq(sorted_indices, expanded_y), dim=1)
+ self._num_correct += torch.sum(correct)
+ self._num_examples += correct.shape[0]
+
+ def compute(self):
+ if self._num_examples == 0:
+ raise NotComputableError('TopKCategoricalAccuracy must have at least one example before it can be computed')
+ return self._num_correct / self._num_examples
| Add pairwise distance to Metrics
I think in evaluation of regression task, pairwise distance, especially norm-2 distance, as in `torch.nn.functional.pairwise_distance` is at least as frequently used as MSE, which is actually mostly used as loss rather than evaluation metrics. Therefore, I was wondering if it is worthy of being added to Metrics package as a commonly used metrics. | pytorch/ignite | diff --git a/tests/ignite/metrics/test_binary_accuracy.py b/tests/ignite/metrics/test_binary_accuracy.py
new file mode 100644
index 00000000..4e083ec3
--- /dev/null
+++ b/tests/ignite/metrics/test_binary_accuracy.py
@@ -0,0 +1,25 @@
+from ignite.exceptions import NotComputableError
+from ignite.metrics import BinaryAccuracy
+import pytest
+import torch
+
+
+def test_zero_div():
+ acc = BinaryAccuracy()
+ with pytest.raises(NotComputableError):
+ acc.compute()
+
+
+def test_compute():
+ acc = BinaryAccuracy()
+
+ y_pred = torch.FloatTensor([0.2, 0.4, 0.6, 0.8])
+ y = torch.ones(4).type(torch.LongTensor)
+ acc.update((y_pred, y))
+ assert acc.compute() == 0.5
+
+ acc.reset()
+ y_pred = torch.FloatTensor([0.2, 0.7, 0.8, 0.9])
+ y = torch.ones(4).type(torch.LongTensor)
+ acc.update((y_pred, y))
+ assert acc.compute() == 0.75
diff --git a/tests/ignite/metrics/test_mean_absolute_error.py b/tests/ignite/metrics/test_mean_absolute_error.py
new file mode 100644
index 00000000..f2bd60bf
--- /dev/null
+++ b/tests/ignite/metrics/test_mean_absolute_error.py
@@ -0,0 +1,25 @@
+from ignite.exceptions import NotComputableError
+from ignite.metrics import MeanAbsoluteError
+import pytest
+import torch
+
+
+def test_zero_div():
+ mae = MeanAbsoluteError()
+ with pytest.raises(NotComputableError):
+ mae.compute()
+
+
+def test_compute():
+ mae = MeanAbsoluteError()
+
+ y_pred = torch.Tensor([[2.0], [-2.0]])
+ y = torch.zeros(2)
+ mae.update((y_pred, y))
+ assert mae.compute() == 2.0
+
+ mae.reset()
+ y_pred = torch.Tensor([[3.0], [-3.0]])
+ y = torch.zeros(2)
+ mae.update((y_pred, y))
+ assert mae.compute() == 3.0
diff --git a/tests/ignite/metrics/test_mean_pairwise_distance.py b/tests/ignite/metrics/test_mean_pairwise_distance.py
new file mode 100644
index 00000000..45c69fdc
--- /dev/null
+++ b/tests/ignite/metrics/test_mean_pairwise_distance.py
@@ -0,0 +1,26 @@
+from ignite.exceptions import NotComputableError
+from ignite.metrics import MeanPairwiseDistance
+import pytest
+from pytest import approx
+import torch
+
+
+def test_zero_div():
+ mpd = MeanPairwiseDistance()
+ with pytest.raises(NotComputableError):
+ mpd.compute()
+
+
+def test_compute():
+ mpd = MeanPairwiseDistance()
+
+ y_pred = torch.Tensor([[3.0, 4.0], [-3.0, -4.0]])
+ y = torch.zeros(2, 2)
+ mpd.update((y_pred, y))
+ assert mpd.compute() == approx(5.0)
+
+ mpd.reset()
+ y_pred = torch.Tensor([[4.0, 4.0, 4.0, 4.0], [-4.0, -4.0, -4.0, -4.0]])
+ y = torch.zeros(2, 4)
+ mpd.update((y_pred, y))
+ assert mpd.compute() == approx(8.0)
diff --git a/tests/ignite/metrics/test_root_mean_squared_error.py b/tests/ignite/metrics/test_root_mean_squared_error.py
new file mode 100644
index 00000000..69408ced
--- /dev/null
+++ b/tests/ignite/metrics/test_root_mean_squared_error.py
@@ -0,0 +1,25 @@
+from ignite.exceptions import NotComputableError
+from ignite.metrics import RootMeanSquaredError
+import pytest
+import torch
+
+
+def test_zero_div():
+ rmse = RootMeanSquaredError()
+ with pytest.raises(NotComputableError):
+ rmse.compute()
+
+
+def test_compute():
+ rmse = RootMeanSquaredError()
+
+ y_pred = torch.Tensor([[2.0], [-2.0]])
+ y = torch.zeros(2)
+ rmse.update((y_pred, y))
+ assert rmse.compute() == 2.0
+
+ rmse.reset()
+ y_pred = torch.Tensor([[3.0], [-3.0]])
+ y = torch.zeros(2)
+ rmse.update((y_pred, y))
+ assert rmse.compute() == 3.0
diff --git a/tests/ignite/metrics/test_top_k_categorical_accuracy.py b/tests/ignite/metrics/test_top_k_categorical_accuracy.py
new file mode 100644
index 00000000..d9164f9f
--- /dev/null
+++ b/tests/ignite/metrics/test_top_k_categorical_accuracy.py
@@ -0,0 +1,25 @@
+from ignite.exceptions import NotComputableError
+from ignite.metrics import TopKCategoricalAccuracy
+import pytest
+import torch
+
+
+def test_zero_div():
+ acc = TopKCategoricalAccuracy(2)
+ with pytest.raises(NotComputableError):
+ acc.compute()
+
+
+def test_compute():
+ acc = TopKCategoricalAccuracy(2)
+
+ y_pred = torch.FloatTensor([[0.2, 0.4, 0.6, 0.8], [0.8, 0.6, 0.4, 0.2]])
+ y = torch.ones(2).type(torch.LongTensor)
+ acc.update((y_pred, y))
+ assert acc.compute() == 0.5
+
+ acc.reset()
+ y_pred = torch.FloatTensor([[0.4, 0.8, 0.2, 0.6], [0.8, 0.6, 0.4, 0.2]])
+ y = torch.ones(2).type(torch.LongTensor)
+ acc.update((y_pred, y))
+ assert acc.compute() == 1.0
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"numpy",
"mock",
"pytest",
"codecov",
"pytest-cov",
"tqdm",
"scikit-learn",
"visdom",
"torchvision",
"tensorboardX",
"gym"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
charset-normalizer==2.0.12
cloudpickle==2.2.1
codecov==2.1.13
coverage==6.2
dataclasses==0.8
decorator==4.4.2
enum34==1.1.10
gym==0.26.2
gym-notices==0.0.8
idna==3.10
-e git+https://github.com/pytorch/ignite.git@0b4aec7629390ed797782fede8e3f11fe7c549f7#egg=ignite
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
joblib==1.1.1
jsonpatch==1.32
jsonpointer==2.3
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
networkx==2.5.1
numpy==1.19.5
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
Pillow==8.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
protobuf==4.21.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
requests==2.27.1
scikit-learn==0.24.2
scipy==1.5.4
six==1.17.0
tensorboardX==2.6.2.2
threadpoolctl==3.1.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
torch==1.10.1
torchvision==0.11.2
tornado==6.1
tqdm==4.64.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
visdom==0.2.4
websocket-client==1.3.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: ignite
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- charset-normalizer==2.0.12
- cloudpickle==2.2.1
- codecov==2.1.13
- coverage==6.2
- dataclasses==0.8
- decorator==4.4.2
- enum34==1.1.10
- gym==0.26.2
- gym-notices==0.0.8
- idna==3.10
- importlib-resources==5.4.0
- joblib==1.1.1
- jsonpatch==1.32
- jsonpointer==2.3
- mock==5.2.0
- networkx==2.5.1
- numpy==1.19.5
- pillow==8.4.0
- protobuf==4.21.0
- pytest-cov==4.0.0
- requests==2.27.1
- scikit-learn==0.24.2
- scipy==1.5.4
- six==1.17.0
- tensorboardx==2.6.2.2
- threadpoolctl==3.1.0
- tomli==1.2.3
- torch==1.10.1
- torchvision==0.11.2
- tornado==6.1
- tqdm==4.64.1
- urllib3==1.26.20
- visdom==0.2.4
- websocket-client==1.3.1
prefix: /opt/conda/envs/ignite
| [
"tests/ignite/metrics/test_binary_accuracy.py::test_zero_div",
"tests/ignite/metrics/test_binary_accuracy.py::test_compute",
"tests/ignite/metrics/test_mean_absolute_error.py::test_zero_div",
"tests/ignite/metrics/test_mean_absolute_error.py::test_compute",
"tests/ignite/metrics/test_mean_pairwise_distance.py::test_zero_div",
"tests/ignite/metrics/test_mean_pairwise_distance.py::test_compute",
"tests/ignite/metrics/test_root_mean_squared_error.py::test_zero_div",
"tests/ignite/metrics/test_root_mean_squared_error.py::test_compute",
"tests/ignite/metrics/test_top_k_categorical_accuracy.py::test_zero_div",
"tests/ignite/metrics/test_top_k_categorical_accuracy.py::test_compute"
] | [] | [] | [] | BSD 3-Clause "New" or "Revised" License | 2,260 | 1,688 | [
"ignite/metrics/__init__.py"
] |
marshmallow-code__marshmallow-750 | a867533d53ddbe8cb0ff63c1dc3ca53337ba525c | 2018-03-06 18:29:59 | 8e217c8d6fefb7049ab3389f31a8d35824fa2d96 | diff --git a/marshmallow/decorators.py b/marshmallow/decorators.py
index 8b6df0df..cd850fc0 100644
--- a/marshmallow/decorators.py
+++ b/marshmallow/decorators.py
@@ -107,6 +107,9 @@ def post_dump(fn=None, pass_many=False, pass_original=False):
By default, receives a single object at a time, transparently handling the ``many``
argument passed to the Schema. If ``pass_many=True``, the raw data
(which may be a collection) and the value for ``many`` is passed.
+
+ If ``pass_original=True``, the original data (before serializing) will be passed as
+ an additional argument to the method.
"""
return tag_processor(POST_DUMP, fn, pass_many, pass_original=pass_original)
@@ -129,6 +132,9 @@ def post_load(fn=None, pass_many=False, pass_original=False):
By default, receives a single datum at a time, transparently handling the ``many``
argument passed to the Schema. If ``pass_many=True``, the raw data
(which may be a collection) and the value for ``many`` is passed.
+
+ If ``pass_original=True``, the original data (before deserializing) will be passed as
+ an additional argument to the method.
"""
return tag_processor(POST_LOAD, fn, pass_many, pass_original=pass_original)
diff --git a/marshmallow/schema.py b/marshmallow/schema.py
index 79bb8ee1..904c5322 100644
--- a/marshmallow/schema.py
+++ b/marshmallow/schema.py
@@ -869,8 +869,8 @@ class BaseSchema(base.SchemaABC):
data = utils.if_none(processor(data, many), data)
elif many:
if pass_original:
- data = [utils.if_none(processor(item, original_data), item)
- for item in data]
+ data = [utils.if_none(processor(item, original), item)
+ for item, original in zip(data, original_data)]
else:
data = [utils.if_none(processor(item), item) for item in data]
else:
| post_dump is passing a list of objects as original object
Hi,
I think post_dump with pass_original=True should pass the original object related to the data serialized and not a list of objects which this object belongs to.
``` python
from marshmallow import fields, post_dump, Schema
class DeviceSchema(Schema):
id = fields.String()
@post_dump(pass_original=True)
def __post_dump(self, data, obj):
print(obj) # <-- this is a list
devices = [dict(id=1), dict(id=2)]
DeviceSchema().dump(devices, many=True)
```
In the above example, the parameter `obj` is a list of devices rather than the device object itself.
What do you think?
| marshmallow-code/marshmallow | diff --git a/tests/test_decorators.py b/tests/test_decorators.py
index 226e550f..41a2d44a 100644
--- a/tests/test_decorators.py
+++ b/tests/test_decorators.py
@@ -758,3 +758,80 @@ def test_decorator_error_handling_with_dump(decorator):
schema.dump(object())
assert exc.value.messages == {'foo': 'error'}
schema.load({})
+
+
+class Nested(object):
+ def __init__(self, foo):
+ self.foo = foo
+
+
+class Example(object):
+ def __init__(self, nested):
+ self.nested = nested
+
+
+example = Example(nested=[Nested(x) for x in range(1)])
+
+
[email protected](
+ "data,expected_data,expected_original_data",
+ (
+ [example, {"foo": 0}, example.nested[0]],
+ ),
+)
+def test_decorator_post_dump_with_nested_pass_original_and_pass_many(
+ data, expected_data, expected_original_data):
+
+ class NestedSchema(Schema):
+ foo = fields.Int(required=True)
+
+ @post_dump(pass_many=False, pass_original=True)
+ def check_pass_original_when_pass_many_false(self, data, original_data):
+ assert data == expected_data
+ assert original_data == expected_original_data
+ return data
+
+ @post_dump(pass_many=True, pass_original=True)
+ def check_pass_original_when_pass_many_true(self, data, many, original_data):
+ assert many is True
+ assert data == [expected_data]
+ assert original_data == [expected_original_data]
+ return data
+
+ class ExampleSchema(Schema):
+ nested = fields.Nested(NestedSchema, required=True, many=True)
+
+ schema = ExampleSchema()
+ assert schema.dump(data) == {"nested": [{"foo": 0}]}
+
+
[email protected](
+ "data,expected_data,expected_original_data",
+ (
+ [{"nested": [{"foo": 0}]}, {"foo": 0}, {"foo": 0}],
+ ),
+)
+def test_decorator_post_load_with_nested_pass_original_and_pass_many(
+ data, expected_data, expected_original_data):
+
+ class NestedSchema(Schema):
+ foo = fields.Int(required=True)
+
+ @post_load(pass_many=False, pass_original=True)
+ def check_pass_original_when_pass_many_false(self, data, original_data):
+ assert data == expected_data
+ assert original_data == expected_original_data
+ return data
+
+ @post_load(pass_many=True, pass_original=True)
+ def check_pass_original_when_pass_many_true(self, data, many, original_data):
+ assert many is True
+ assert data == [expected_data]
+ assert original_data == [expected_original_data]
+ return data
+
+ class ExampleSchema(Schema):
+ nested = fields.Nested(NestedSchema, required=True, many=True)
+
+ schema = ExampleSchema()
+ assert schema.load(data) == data
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 2
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[reco]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"dev-requirements.txt",
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
flake8==3.5.0
iniconfig==2.1.0
invoke==0.22.0
-e git+https://github.com/marshmallow-code/marshmallow.git@a867533d53ddbe8cb0ff63c1dc3ca53337ba525c#egg=marshmallow
mccabe==0.6.1
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
py==1.11.0
pycodestyle==2.3.1
pyflakes==1.6.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.6.1
pytz==2017.3
simplejson==3.13.2
six==1.17.0
toml==0.10.2
tomli==2.2.1
tox==3.12.1
typing_extensions==4.13.0
virtualenv==20.29.3
| name: marshmallow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- flake8==3.5.0
- iniconfig==2.1.0
- invoke==0.22.0
- mccabe==0.6.1
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- py==1.11.0
- pycodestyle==2.3.1
- pyflakes==1.6.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.6.1
- pytz==2017.3
- simplejson==3.13.2
- six==1.17.0
- toml==0.10.2
- tomli==2.2.1
- tox==3.12.1
- typing-extensions==4.13.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/marshmallow
| [
"tests/test_decorators.py::test_decorator_post_dump_with_nested_pass_original_and_pass_many[data0-expected_data0-expected_original_data0]",
"tests/test_decorators.py::test_decorator_post_load_with_nested_pass_original_and_pass_many[data0-expected_data0-expected_original_data0]"
] | [] | [
"tests/test_decorators.py::test_decorated_processors",
"tests/test_decorators.py::TestPassOriginal::test_pass_original_single_no_mutation",
"tests/test_decorators.py::TestPassOriginal::test_pass_original_single_with_mutation",
"tests/test_decorators.py::TestPassOriginal::test_pass_original_many",
"tests/test_decorators.py::test_decorated_processor_inheritance",
"tests/test_decorators.py::test_pre_dump_is_invoked_before_implicit_field_generation",
"tests/test_decorators.py::TestValidatesDecorator::test_validates",
"tests/test_decorators.py::TestValidatesDecorator::test_validates_with_attribute",
"tests/test_decorators.py::TestValidatesDecorator::test_validates_decorator",
"tests/test_decorators.py::TestValidatesDecorator::test_field_not_present",
"tests/test_decorators.py::TestValidatesDecorator::test_precedence",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_validator_nested_many",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_validator_nested_many_pass_original_and_pass_many[True-expected_data0-expected_original_data0-data0]",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_validator_nested_many_pass_original_and_pass_many[False-expected_data1-expected_original_data1-data0]",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_decorated_validators",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_multiple_validators",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_passing_original_data",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_allow_arbitrary_field_names_in_error",
"tests/test_decorators.py::TestValidatesSchemaDecorator::test_skip_on_field_errors",
"tests/test_decorators.py::test_decorator_error_handling",
"tests/test_decorators.py::test_decorator_error_handling_with_load[pre_load]",
"tests/test_decorators.py::test_decorator_error_handling_with_load[post_load]",
"tests/test_decorators.py::test_decorator_error_handling_with_dump[pre_dump]",
"tests/test_decorators.py::test_decorator_error_handling_with_dump[post_dump]"
] | [] | MIT License | 2,261 | 520 | [
"marshmallow/decorators.py",
"marshmallow/schema.py"
] |
|
wright-group__WrightTools-534 | a11e47d7786f63dcc595c8e9ccf121e73a16407b | 2018-03-06 22:01:33 | a6ff42f2a36f12a92d186a9532f6ec4cfd58d3c0 | pep8speaks: Hello @ksunden! Thanks for submitting the PR.
- In the file [`WrightTools/kit/_array.py`](https://github.com/wright-group/WrightTools/blob/405cd2cd8b838ac1bbf3b676aaaeb5f7a6de2a3d/WrightTools/kit/_array.py), following are the PEP8 issues :
> [Line 216:35](https://github.com/wright-group/WrightTools/blob/405cd2cd8b838ac1bbf3b676aaaeb5f7a6de2a3d/WrightTools/kit/_array.py#L216): [E712](https://duckduckgo.com/?q=pep8%20E712) comparison to False should be 'if cond is False:' or 'if not cond:'
- In the file [`tests/kit/remove_nans_1D.py`](https://github.com/wright-group/WrightTools/blob/405cd2cd8b838ac1bbf3b676aaaeb5f7a6de2a3d/tests/kit/remove_nans_1D.py), following are the PEP8 issues :
> [Line 30:1](https://github.com/wright-group/WrightTools/blob/405cd2cd8b838ac1bbf3b676aaaeb5f7a6de2a3d/tests/kit/remove_nans_1D.py#L30): [E302](https://duckduckgo.com/?q=pep8%20E302) expected 2 blank lines, found 1
ksunden: ```
>>> a = np.array([np.nan, 1, 2, 2])
>>> np.isnan(a)
array([ True, False, False, False])
>>> not np.isnan(a)
Traceback (most recent call last):
File "<input>", line 1, in <module>
not np.isnan(a)
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any()
or a.all()
>>> np.isnan(a) == False
array([False, True, True, True])
>>> np.isnan(a) is False
False
```
Hush, pep8speaks, that doesn't work here.... | diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py
index 66cfb11..16136f1 100644
--- a/WrightTools/kit/_array.py
+++ b/WrightTools/kit/_array.py
@@ -210,17 +210,10 @@ def remove_nans_1D(*args):
tuple
Tuple of 1D arrays in same order as given, with nan indicies removed.
"""
- # find all indicies to keep
- bads = np.array([])
- for arr in args:
- bad = np.array(np.where(np.isnan(arr))).flatten()
- bads = np.hstack((bad, bads))
- if hasattr(args, 'shape') and len(args.shape) == 1:
- goods = [i for i in np.arange(args.shape[0]) if i not in bads]
- else:
- goods = [i for i in np.arange(len(args[0])) if i not in bads]
- # apply
- return tuple(a[goods] for a in args)
+ vals = np.isnan(args[0])
+ for a in args:
+ vals |= np.isnan(a)
+ return tuple(np.array(a)[vals == False] for a in args)
def share_nans(*arrs):
| remove_nans_1D fails for list
```
>>> wt.kit.remove_nans_1D([np.nan, 1, 2, 2])
Traceback (most recent call last):
File "<input>", line 1, in <module>
wt.kit.remove_nans_1D([np.nan, 1, 2, 2])
File "/home/kyle/wright/WrightTools/WrightTools/kit/_array.py", line 223, in rem
ove_nans_1D
return tuple(a[goods] for a in args)
File "/home/kyle/wright/WrightTools/WrightTools/kit/_array.py", line 223, in <ge
nexpr>
return tuple(a[goods] for a in args)
TypeError: list indices must be integers or slices, not list
>>> wt.kit.remove_nans_1D(np.array([np.nan, 1, 2, 2]))
(array([1., 2., 2.]),)
``` | wright-group/WrightTools | diff --git a/tests/kit/remove_nans_1D.py b/tests/kit/remove_nans_1D.py
old mode 100644
new mode 100755
index 31d15ab..8c09a16
--- a/tests/kit/remove_nans_1D.py
+++ b/tests/kit/remove_nans_1D.py
@@ -1,3 +1,4 @@
+#! /usr/bin/env python3
"""Test remove_nans_1D."""
@@ -18,10 +19,20 @@ def test_simple():
assert wt.kit.remove_nans_1D(arr)[0].all() == np.arange(0, 6, dtype=float).all()
-def test_list():
+def test_multiple():
arrs = [np.random.random(21) for _ in range(5)]
arrs[0][0] = np.nan
arrs[1][-1] = np.nan
arrs = wt.kit.remove_nans_1D(*arrs)
for arr in arrs:
assert arr.size == 19
+
+
+def test_list():
+ assert np.all(wt.kit.remove_nans_1D([np.nan, 1, 2, 3])[0] == np.array([1, 2, 3]))
+
+
+if __name__ == "__main__":
+ test_simple()
+ test_multiple()
+ test_list()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"flake8",
"pydocstyle"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc libfreetype6-dev hdf5-tools libhdf5-dev libopenblas-dev"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
coverage==6.2
cycler==0.11.0
flake8==5.0.4
h5py==3.1.0
imageio==2.15.0
importlib-metadata==4.2.0
iniconfig==1.1.1
kiwisolver==1.3.1
matplotlib==3.3.4
mccabe==0.7.0
numexpr==2.8.1
numpy==1.19.5
packaging==21.3
Pillow==8.4.0
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pydocstyle==6.3.0
pyflakes==2.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
scipy==1.5.4
six==1.17.0
snowballstemmer==2.2.0
tidy_headers==1.0.3
tomli==1.2.3
typing_extensions==4.1.1
-e git+https://github.com/wright-group/WrightTools.git@a11e47d7786f63dcc595c8e9ccf121e73a16407b#egg=WrightTools
zipp==3.6.0
| name: WrightTools
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- attrs==22.2.0
- cached-property==1.5.2
- coverage==6.2
- cycler==0.11.0
- flake8==5.0.4
- h5py==3.1.0
- imageio==2.15.0
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- kiwisolver==1.3.1
- matplotlib==3.3.4
- mccabe==0.7.0
- numexpr==2.8.1
- numpy==1.19.5
- packaging==21.3
- pillow==8.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pydocstyle==6.3.0
- pyflakes==2.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- scipy==1.5.4
- six==1.17.0
- snowballstemmer==2.2.0
- tidy-headers==1.0.3
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/WrightTools
| [
"tests/kit/remove_nans_1D.py::test_list"
] | [] | [
"tests/kit/remove_nans_1D.py::test_simple",
"tests/kit/remove_nans_1D.py::test_multiple"
] | [] | MIT License | 2,262 | 307 | [
"WrightTools/kit/_array.py"
] |
Backblaze__B2_Command_Line_Tool-420 | 15a60ad1c71b75366061e4f742ef52eb9dcc23e7 | 2018-03-07 02:01:24 | ee2339bd21d21d6140936d58597957250a33fc26 | diff --git a/b2/sync/scan_policies.py b/b2/sync/scan_policies.py
index 198c079..dfb9413 100644
--- a/b2/sync/scan_policies.py
+++ b/b2/sync/scan_policies.py
@@ -27,10 +27,45 @@ class RegexSet(object):
return any(c.match(s) is not None for c in self._compiled_list)
+def convert_dir_regex_to_dir_prefix_regex(dir_regex):
+ """
+ The patterns used to match directory names (and file names) are allowed
+ to match a prefix of the name. This 'feature' was unintentional, but is
+ being retained for compatibility.
+
+ This means that a regex that matches a directory name can't be used directly
+ to match against a file name and test whether the file should be excluded
+ because it matches the directory.
+
+ The pattern 'photos' will match directory names 'photos' and 'photos2',
+ and should exclude files 'photos/kitten.jpg', and 'photos2/puppy.jpg'.
+ It should not exclude 'photos.txt', because there is no directory name
+ that matches.
+
+ On the other hand, the pattern 'photos$' should match 'photos/kitten.jpg',
+ but not 'photos2/puppy.jpg', nor 'photos.txt'
+
+ If the original regex is valid, there are only two cases to consider:
+ either the regex ends in '$' or does not.
+ """
+ if dir_regex.endswith('$'):
+ return dir_regex[:-1] + r'/'
+ else:
+ return dir_regex + r'.*?/'
+
+
class ScanPoliciesManager(object):
"""
Policy object used when scanning folders for syncing, used to decide
which files to include in the list of files to be synced.
+
+ Code that scans through files should at least use should_exclude_file()
+ to decide whether each file should be included; it will check include/exclude
+ patterns for file names, as well as patterns for excluding directeries.
+
+ Code that scans may optionally use should_exclude_directory() to test whether
+ it can skip a directory completely and not bother listing the files and
+ sub-directories in it.
"""
def __init__(
@@ -40,6 +75,9 @@ class ScanPoliciesManager(object):
include_file_regexes=tuple(),
):
self._exclude_dir_set = RegexSet(exclude_dir_regexes)
+ self._exclude_file_because_of_dir_set = RegexSet(
+ map(convert_dir_regex_to_dir_prefix_regex, exclude_dir_regexes)
+ )
self._exclude_file_set = RegexSet(exclude_file_regexes)
self._include_file_set = RegexSet(include_file_regexes)
@@ -51,8 +89,12 @@ class ScanPoliciesManager(object):
being scanned.
:return: True iff excluded.
"""
- return self._exclude_file_set.matches(file_path) and \
- not self._include_file_set.matches(file_path)
+ exclude_because_of_dir = self._exclude_file_because_of_dir_set.matches(file_path)
+ exclude_because_of_file = (
+ self._exclude_file_set.matches(file_path) and
+ not self._include_file_set.matches(file_path)
+ )
+ return exclude_because_of_dir or exclude_because_of_file
def should_exclude_directory(self, dir_path):
"""
| --excludeDirRegex does not work when source is B2
The new filtering that lets you exclude an entire directory works in the `LocalFolder` class, but not the `B2Folder` class.
I think there are two possible approaches to fixing it: (1) change B2Folder to simulate the existence of directories, and check them for exclusion, or (2) extend `ScanPoliciesManager.should_exclude_file` to also test whether any of the directories in the path are excluded. I like #2, but I think it would need optimization to avoid checking every parent directory of every file. | Backblaze/B2_Command_Line_Tool | diff --git a/test/test_scan_policies.py b/test/test_scan_policies.py
index f3bb797..853730d 100644
--- a/test/test_scan_policies.py
+++ b/test/test_scan_policies.py
@@ -30,8 +30,20 @@ class TestScanPolicies(TestBase):
def test_exclude_dir(self):
policy = ScanPoliciesManager(
- include_file_regexes=['.*[.]txt$'], exclude_dir_regexes=['alfa$']
+ include_file_regexes=['.*[.]txt$'], exclude_dir_regexes=['alfa', 'bravo$']
)
self.assertTrue(policy.should_exclude_directory('alfa'))
- self.assertFalse(policy.should_exclude_directory('alfa2'))
- self.assertFalse(policy.should_exclude_directory('alfa/hello'))
+ self.assertTrue(policy.should_exclude_directory('alfa2'))
+ self.assertTrue(policy.should_exclude_directory('alfa/hello'))
+
+ self.assertTrue(policy.should_exclude_directory('bravo'))
+ self.assertFalse(policy.should_exclude_directory('bravo2'))
+ self.assertFalse(policy.should_exclude_directory('bravo/hello'))
+
+ self.assertTrue(policy.should_exclude_file('alfa/foo'))
+ self.assertTrue(policy.should_exclude_file('alfa2/hello/foo'))
+ self.assertTrue(policy.should_exclude_file('alfa/hello/foo.txt'))
+
+ self.assertTrue(policy.should_exclude_file('bravo/foo'))
+ self.assertFalse(policy.should_exclude_file('bravo2/hello/foo'))
+ self.assertTrue(policy.should_exclude_file('bravo/hello/foo.txt'))
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | arrow==0.12.0
attrs==22.2.0
-e git+https://github.com/Backblaze/B2_Command_Line_Tool.git@15a60ad1c71b75366061e4f742ef52eb9dcc23e7#egg=b2
certifi==2021.5.30
charset-normalizer==2.0.12
idna==3.10
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
logfury==1.0.1
nose==1.3.7
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
requests==2.27.1
six==1.17.0
tomli==1.2.3
tqdm==4.64.1
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: B2_Command_Line_Tool
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- arrow==0.12.0
- attrs==22.2.0
- charset-normalizer==2.0.12
- idna==3.10
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- logfury==1.0.1
- nose==1.3.7
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- requests==2.27.1
- six==1.17.0
- tomli==1.2.3
- tqdm==4.64.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/B2_Command_Line_Tool
| [
"test/test_scan_policies.py::TestScanPolicies::test_exclude_dir"
] | [] | [
"test/test_scan_policies.py::TestScanPolicies::test_default",
"test/test_scan_policies.py::TestScanPolicies::test_exclude_include"
] | [] | MIT License | 2,263 | 775 | [
"b2/sync/scan_policies.py"
] |
|
pika__pika-991 | 16cdb80b4c0aacc9766abf033fccecb2c1ccb1a3 | 2018-03-07 18:25:08 | 4c904dea651caaf2a54b0fca0b9e908dec18a4f8 | vitaly-krugl: @lukebakken, I haven't forgotten - will catch up in the next couple of days.
lukebakken: @vitaly-krugl no hurry at all! Thanks again.
lukebakken: @vitaly-krugl the `queue.declare` method never makes it to RabbitMQ. An `AssertionError` is thrown [here](https://github.com/pika/pika/blob/master/pika/spec.py#L1003-L1004) which gums up the works when the `with` clause tries to exit.
vitaly-krugl: Would this alone punch at the heart of the problem [here](https://github.com/pika/pika/blob/b7f27983cfbcbaf34a06b6fc9259a7fd50b8838d/pika/channel.py#L1382)?
```
try:
self._send_method(method)
except Exception:
self._blocking = None
raise
```
lukebakken: I'll try that out.
vitaly-krugl: Don't try it out just yet, I missed something there (and the fix in the PR did, too, I think)
vitaly-krugl: The [if acceptable_replies:](https://github.com/pika/pika/blob/b7f27983cfbcbaf34a06b6fc9259a7fd50b8838d/pika/channel.py#L1361) block sets `self._blocking` and also registers a number of callbacks. If `self._send_method(method)` (as in this case), we really don't want any of those registered callbacks to remain there either.
Since a successful `self._send_method(method)` call will ultimately just enqueue some data on the output write buffer, it should be possible to move the (https://github.com/pika/pika/blob/b7f27983cfbcbaf34a06b6fc9259a7fd50b8838d/pika/channel.py#L1361) block after [self._send_method(method)](https://github.com/pika/pika/blob/b7f27983cfbcbaf34a06b6fc9259a7fd50b8838d/pika/channel.py#L1382).
Furthermore, to ensure that an incomplete message doesn't get placed in the output buffer (due to marshaling failure of one of its subframes), [Connection._send_message()](https://github.com/pika/pika/blob/b7f27983cfbcbaf34a06b6fc9259a7fd50b8838d/pika/connection.py#L2273) needs to be modified to pre-marshal all of its parts and then append them to the output frame buffer only after all marshaling is done, updating the stats and finally calling `self._flush_outbound() ` and `self._detect_backpressure()` like [Connection._send_frame()](https://github.com/pika/pika/blob/b7f27983cfbcbaf34a06b6fc9259a7fd50b8838d/pika/connection.py#L2251-L2257). To this end, `Connection._send_message()` and `Connection._send_frame()` should share a method (e.g., `Connection._output_marshaled_frame()` that updates `self.bytes_sent` and `self.frames_sent` and appends the marshaled frame data to `self.outbound_buffer` .
lukebakken: @vitaly-krugl - ready for re-review. Thanks!
lukebakken: @vitaly-krugl - I have merged in the tests you provided and this is ready for another review. Thanks!
vitaly-krugl: @lukebakken, I renamed this PR "Request marshaling error should not corrupt a channel", which reflects issues #990 and #912 more accurately.
vitaly-krugl: On broker's Channel.Close, the draining is necessary because ANQP says to
ignore all incoming requests after channel is closed except
Channel.Close. So, draining in that case helps break the gridlock.
However, in the case the client is closing the channel with some blocking
requests still pending normally, we have a perfectly healthy channel and
nothing special is needed. The normal course of events will see it through.
On Tue, Apr 10, 2018, 5:00 AM Luke Bakken <[email protected]> wrote:
> *@lukebakken* commented on this pull request.
> ------------------------------
>
> In pika/channel.py
> <https://github.com/pika/pika/pull/991#discussion_r180392933>:
>
> > @@ -1327,9 +1327,10 @@ def _on_synchronous_complete(self, _method_frame_unused):
> while self._blocked and self._blocking is None:
> self._rpc(*self._blocked.popleft())
>
> - def _drain_blocked_methods_on_remote_close(self):
>
> If we think we need an "emergency channel-close" method that purges
> queued-up requests (I don't think we do
>
> Draining blocked methods on a broker-initiated close was introduced in
> #957 <https://github.com/pika/pika/pull/957> - please check that PR out
> again. I still think it's necessary.
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub
> <https://github.com/pika/pika/pull/991#discussion_r180392933>, or mute
> the thread
> <https://github.com/notifications/unsubscribe-auth/ABX9KigMj1hv6PIavaR70oqFZk9LqB0iks5tnJ7igaJpZM4Sg71J>
> .
>
vitaly-krugl: @lukebakken, is this PR ready for re-review?
vitaly-krugl: I think I might not have ended that review ??
On Mon, Apr 16, 2018, 6:22 AM Luke Bakken <[email protected]> wrote:
> @vitaly-krugl <https://github.com/vitaly-krugl> if I re-select your name
> in the "Reviewers" dropdown, the status icon changes back to an orange disk
> ... do you not get a new email saying I re-requested a review? I assumed
> that you did. If you don't get an email, I can @-mention you in a comment.
> Thanks for the re-re-reviews 😄
>
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub
> <https://github.com/pika/pika/pull/991#issuecomment-381597285>, or mute
> the thread
> <https://github.com/notifications/unsubscribe-auth/ABX9KmC9Xn-wJn2vurcgpPYZe9kJuVEtks5tpJsrgaJpZM4Sg71J>
> .
>
lukebakken: @vitaly-krugl thanks! Sorry I missed the previous comment about that test.
lukebakken: @vitaly-krugl - changes made. I'll merge this once builds complete. Thanks! | diff --git a/pika/channel.py b/pika/channel.py
index fb67a0d..282f53c 100644
--- a/pika/channel.py
+++ b/pika/channel.py
@@ -1347,7 +1347,7 @@ class Channel(object):
sent, and thus its completion callback would never be called.
"""
- LOGGER.debug('Draining %i blocked frames due to remote Channel.Close',
+ LOGGER.debug('Draining %i blocked frames due to broker-requested Channel.Close',
len(self._blocked))
while self._blocked:
method = self._blocked.popleft()[0]
@@ -1408,6 +1408,12 @@ class Channel(object):
self._blocked.append([method, callback, acceptable_replies])
return
+ # Note: _send_method can throw exceptions if there are framing errors
+ # or invalid data passed in. Call it here to prevent self._blocking
+ # from being set if an exception is thrown. This also prevents
+ # acceptable_replies registering callbacks when exceptions are thrown
+ self._send_method(method)
+
# If acceptable replies are set, add callbacks
if acceptable_replies:
# Block until a response frame is received for synchronous frames
@@ -1430,8 +1436,6 @@ class Channel(object):
self.callbacks.add(self.channel_number, reply, callback,
arguments=arguments)
- self._send_method(method)
-
def _raise_if_not_open(self):
"""If channel is not in the OPEN state, raises ChannelClosed with
`reply_code` and `reply_text` corresponding to current state. If channel
diff --git a/pika/connection.py b/pika/connection.py
index be2b1bc..271b198 100644
--- a/pika/connection.py
+++ b/pika/connection.py
@@ -2306,11 +2306,7 @@ class Connection(pika.compat.AbstractBase):
'Attempted to send a frame on closed connection.')
marshaled_frame = frame_value.marshal()
- self.bytes_sent += len(marshaled_frame)
- self.frames_sent += 1
- self._adapter_emit_data(marshaled_frame)
- if self.params.backpressure_detection:
- self._detect_backpressure()
+ self._output_marshaled_frames([marshaled_frame])
def _send_method(self, channel_number, method, content=None):
"""Constructs a RPC method frame and then sends it to the broker.
@@ -2336,8 +2332,14 @@ class Connection(pika.compat.AbstractBase):
"""
length = len(content[1])
- self._send_frame(frame.Method(channel_number, method_frame))
- self._send_frame(frame.Header(channel_number, length, content[0]))
+ marshaled_body_frames = []
+
+ # Note: we construct the Method, Header and Content objects, marshal them
+ # *then* output in case the marshaling operation throws an exception
+ frame_method = frame.Method(channel_number, method_frame)
+ frame_header = frame.Header(channel_number, length, content[0])
+ marshaled_body_frames.append(frame_method.marshal())
+ marshaled_body_frames.append(frame_header.marshal())
if content[1]:
chunks = int(math.ceil(float(length) / self._body_max_length))
@@ -2346,7 +2348,10 @@ class Connection(pika.compat.AbstractBase):
end = start + self._body_max_length
if end > length:
end = length
- self._send_frame(frame.Body(channel_number, content[1][start:end]))
+ frame_body = frame.Body(channel_number, content[1][start:end])
+ marshaled_body_frames.append(frame_body.marshal())
+
+ self._output_marshaled_frames(marshaled_body_frames)
def _set_connection_state(self, connection_state):
"""Set the connection state.
@@ -2382,3 +2387,16 @@ class Connection(pika.compat.AbstractBase):
"""
self._frame_buffer = self._frame_buffer[byte_count:]
self.bytes_received += byte_count
+
+ def _output_marshaled_frames(self, marshaled_frames):
+ """Output list of marshaled frames to buffer and update stats
+
+ :param list marshaled_frames: A list of frames marshaled to bytes
+
+ """
+ for marshaled_frame in marshaled_frames:
+ self.bytes_sent += len(marshaled_frame)
+ self.frames_sent += 1
+ self._adapter_emit_data(marshaled_frame)
+ if self.params.backpressure_detection:
+ self._detect_backpressure()
| BlockingChannel.queue_declare hanging on non-string queue parameters
Under Python 3.6.4 and Pika 0.11.2, the `BlockingChannel.queue_declare` method hangs when setting its `queue` parameter to a value that is not of `str` type (e.g., `int`, `bool`, `list`, `dict`, `tuple`, `NoneType`).
Input:
```
$ python3 <<EOF
import pika
with pika.BlockingConnection() as connection:
channel = connection.channel()
channel.queue_declare(queue=[1, 2, 3])
EOF
``` | pika/pika | diff --git a/tests/acceptance/async_adapter_tests.py b/tests/acceptance/async_adapter_tests.py
index 2a51ae1..967e109 100644
--- a/tests/acceptance/async_adapter_tests.py
+++ b/tests/acceptance/async_adapter_tests.py
@@ -625,8 +625,9 @@ class TestExchangeRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters):
raise AssertionError("Should not have received an Exchange.DeclareOk")
-class TestPassiveExchangeDeclareWithConcurrentClose(AsyncTestCase, AsyncAdapters):
- DESCRIPTION = "should close channel: declare passive exchange with close"
+class TestNoDeadlockWhenClosingChannelWithPendingBlockedRequestsAndConcurrentChannelCloseFromBroker(
+ AsyncTestCase, AsyncAdapters):
+ DESCRIPTION = "No deadlock when closing a channel with pending blocked requests and concurrent Channel.Close from broker."
# To observe the behavior that this is testing, comment out this line
# in pika/channel.py - _on_close:
@@ -636,10 +637,12 @@ class TestPassiveExchangeDeclareWithConcurrentClose(AsyncTestCase, AsyncAdapters
# With the above line commented out, this test will hang
def begin(self, channel):
- self.name = self.__class__.__name__ + ':' + uuid.uuid1().hex
+ base_exch_name = self.__class__.__name__ + ':' + uuid.uuid1().hex
self.channel.add_on_close_callback(self.on_channel_closed)
for i in range(0, 99):
- exch_name = self.name + ':' + str(i)
+ # Passively declare a non-existent exchange to force Channel.Close
+ # from broker
+ exch_name = base_exch_name + ':' + str(i)
cb = functools.partial(self.on_bad_result, exch_name)
channel.exchange_declare(exch_name,
exchange_type='direct',
@@ -648,15 +651,49 @@ class TestPassiveExchangeDeclareWithConcurrentClose(AsyncTestCase, AsyncAdapters
channel.close()
def on_channel_closed(self, channel, reply_code, reply_text):
+ # The close is expected because the requested exchange doesn't exist
self.stop()
def on_bad_result(self, exch_name, frame):
- self.channel.exchange_delete(exch_name)
- raise AssertionError("Should not have received an Exchange.DeclareOk")
+ self.fail("Should not have received an Exchange.DeclareOk")
-class TestQueueDeclareAndDelete(AsyncTestCase, AsyncAdapters):
- DESCRIPTION = "Create and delete a queue"
+class TestClosingAChannelPermitsBlockedRequestToComplete(AsyncTestCase,
+ AsyncAdapters):
+ DESCRIPTION = "Closing a channel permits blocked requests to complete."
+
+ def begin(self, channel):
+ self._queue_deleted = False
+
+ channel.add_on_close_callback(self.on_channel_closed)
+
+ q_name = self.__class__.__name__ + ':' + uuid.uuid1().hex
+ # NOTE we pass callback to make it a blocking request
+ channel.queue_declare(q_name,
+ exclusive=True,
+ callback=lambda _frame: None)
+
+ self.assertIsNotNone(channel._blocking)
+
+ # The Queue.Delete should block on completion of Queue.Declare
+ channel.queue_delete(q_name, callback=self.on_queue_deleted)
+ self.assertTrue(channel._blocked)
+
+ # This Channel.Close should allow the blocked Queue.Delete to complete
+ # Before closing the channel
+ channel.close()
+
+ def on_queue_deleted(self, _frame):
+ # Getting this callback shows that the blocked request was processed
+ self._queue_deleted = True
+
+ def on_channel_closed(self, _channel, _reply_code, _reply_text):
+ self.assertTrue(self._queue_deleted)
+ self.stop()
+
+
+class TestQueueUnnamedDeclareAndDelete(AsyncTestCase, AsyncAdapters):
+ DESCRIPTION = "Create and delete an unnamed queue"
def begin(self, channel):
channel.queue_declare(queue='',
@@ -673,11 +710,11 @@ class TestQueueDeclareAndDelete(AsyncTestCase, AsyncAdapters):
def on_queue_delete(self, frame):
self.assertIsInstance(frame.method, spec.Queue.DeleteOk)
+ # NOTE: with event loops that suppress exceptions from callbacks
self.stop()
-
-class TestQueueNameDeclareAndDelete(AsyncTestCase, AsyncAdapters):
+class TestQueueNamedDeclareAndDelete(AsyncTestCase, AsyncAdapters):
DESCRIPTION = "Create and delete a named queue"
def begin(self, channel):
@@ -701,7 +738,6 @@ class TestQueueNameDeclareAndDelete(AsyncTestCase, AsyncAdapters):
self.stop()
-
class TestQueueRedeclareWithDifferentValues(AsyncTestCase, AsyncAdapters):
DESCRIPTION = "Should close chan: re-declared queue w/ diff params"
@@ -745,7 +781,6 @@ class TestTX1_Select(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103
self.stop()
-
class TestTX2_Commit(AsyncTestCase, AsyncAdapters): # pylint: disable=C0103
DESCRIPTION = "Start a transaction, and commit it"
diff --git a/tests/acceptance/blocking_adapter_test.py b/tests/acceptance/blocking_adapter_test.py
index d79ded2..d0ed48e 100644
--- a/tests/acceptance/blocking_adapter_test.py
+++ b/tests/acceptance/blocking_adapter_test.py
@@ -50,7 +50,6 @@ def setUpModule():
logging.basicConfig(level=logging.DEBUG)
-#@unittest.skip('SKIPPING WHILE DEBUGGING SOME CHANGES. DO NOT MERGE LIKE THIS')
class BlockingTestCaseBase(unittest.TestCase):
TIMEOUT = DEFAULT_TIMEOUT
@@ -355,6 +354,16 @@ class TestCreateAndCloseConnectionWithChannelAndConsumer(BlockingTestCaseBase):
self.assertFalse(ch._impl._consumers)
+class TestUsingInvalidQueueArgument(BlockingTestCaseBase):
+ def test(self):
+ """BlockingConnection raises expected exception when invalid queue parameter is used
+ """
+ connection = self._connect()
+ ch = connection.channel()
+ with self.assertRaises(AssertionError):
+ ch.queue_declare(queue=[1, 2, 3])
+
+
class TestSuddenBrokerDisconnectBeforeChannel(BlockingTestCaseBase):
def test(self):
diff --git a/tests/unit/channel_tests.py b/tests/unit/channel_tests.py
index 10e594e..dc353ef 100644
--- a/tests/unit/channel_tests.py
+++ b/tests/unit/channel_tests.py
@@ -1587,3 +1587,18 @@ class ChannelTests(unittest.TestCase):
self.assertRaises(TypeError,
self.obj._validate_rpc_completion_callback,
'foo')
+
+ def test_no_side_effects_from_send_method_error(self):
+ self.obj._set_state(self.obj.OPEN)
+
+ self.assertIsNone(self.obj._blocking)
+
+ with mock.patch.object(self.obj.callbacks, 'add') as cb_add_mock:
+ with mock.patch.object(self.obj, '_send_method',
+ side_effect=TypeError) as send_method_mock:
+ with self.assertRaises(TypeError):
+ self.obj.queue_delete('', callback=lambda _frame: None)
+
+ self.assertEqual(send_method_mock.call_count, 1)
+ self.assertIsNone(self.obj._blocking)
+ self.assertEqual(cb_add_mock.call_count, 0)
diff --git a/tests/unit/connection_tests.py b/tests/unit/connection_tests.py
index 19df873..04fd543 100644
--- a/tests/unit/connection_tests.py
+++ b/tests/unit/connection_tests.py
@@ -983,3 +983,31 @@ class ConnectionTests(unittest.TestCase): # pylint: disable=R0904
# Make sure _detect_backpressure doesn't throw
self.connection._detect_backpressure()
+
+
+ def test_no_side_effects_from_message_marshal_error(self):
+ # Verify that frame buffer is empty on entry
+ self.assertEqual(b'', self.connection._frame_buffer)
+
+ # Use Basic.Public with invalid body to trigger marshalling error
+ method = spec.Basic.Publish()
+ properties = spec.BasicProperties()
+ # Verify that marshalling of method and header won't trigger error
+ frame.Method(1, method).marshal()
+ frame.Header(1, body_size=10, props=properties).marshal()
+ # Create bogus body that should trigger an error during marshalling
+ body = [1,2,3,4]
+ # Verify that frame body can be created using the bogus body, but
+ # that marshalling will fail
+ frame.Body(1, body)
+ with self.assertRaises(TypeError):
+ frame.Body(1, body).marshal()
+
+ # Now, attempt to send the method with the bogus body
+ with self.assertRaises(TypeError):
+ self.connection._send_method(channel_number=1,
+ method=method,
+ content=(properties, body))
+
+ # Now make sure that nothing is enqueued on frame buffer
+ self.assertEqual(b'', self.connection._frame_buffer)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-asyncio",
"coverage",
"codecov",
"mock",
"tornado",
"twisted"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
Automat==24.8.1
certifi==2025.1.31
charset-normalizer==3.4.1
codecov==2.1.13
constantly==23.10.4
coverage==7.8.0
exceptiongroup==1.2.2
hyperlink==21.0.0
idna==3.10
incremental==24.7.2
iniconfig==2.1.0
mock==5.2.0
nose==1.3.7
packaging==24.2
-e git+https://github.com/pika/pika.git@16cdb80b4c0aacc9766abf033fccecb2c1ccb1a3#egg=pika
pluggy==1.5.0
pytest==8.3.5
pytest-asyncio==0.26.0
requests==2.32.3
tomli==2.2.1
tornado==6.4.2
Twisted==24.11.0
typing_extensions==4.13.0
urllib3==2.3.0
zope.interface==7.2
| name: pika
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- automat==24.8.1
- certifi==2025.1.31
- charset-normalizer==3.4.1
- codecov==2.1.13
- constantly==23.10.4
- coverage==7.8.0
- exceptiongroup==1.2.2
- hyperlink==21.0.0
- idna==3.10
- incremental==24.7.2
- iniconfig==2.1.0
- mock==5.2.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- requests==2.32.3
- tomli==2.2.1
- tornado==6.4.2
- twisted==24.11.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- zope-interface==7.2
prefix: /opt/conda/envs/pika
| [
"tests/unit/channel_tests.py::ChannelTests::test_no_side_effects_from_send_method_error",
"tests/unit/connection_tests.py::ConnectionTests::test_no_side_effects_from_message_marshal_error"
] | [
"tests/acceptance/async_adapter_tests.py::TestA_Connect::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestA_Connect::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestA_Connect::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestA_Connect::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestA_Connect::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestA_Connect::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestConstructAndImmediatelyCloseConnection::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestConstructAndImmediatelyCloseConnection::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestConstructAndImmediatelyCloseConnection::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestConstructAndImmediatelyCloseConnection::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestConstructAndImmediatelyCloseConnection::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestConstructAndImmediatelyCloseConnection::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestCloseConnectionDuringAMQPHandshake::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestCloseConnectionDuringAMQPHandshake::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestCloseConnectionDuringAMQPHandshake::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestCloseConnectionDuringAMQPHandshake::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestCloseConnectionDuringAMQPHandshake::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestCloseConnectionDuringAMQPHandshake::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestSocketConnectTimeoutWithTinySocketTimeout::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestSocketConnectTimeoutWithTinySocketTimeout::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestSocketConnectTimeoutWithTinySocketTimeout::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestSocketConnectTimeoutWithTinySocketTimeout::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestSocketConnectTimeoutWithTinySocketTimeout::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestSocketConnectTimeoutWithTinySocketTimeout::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestStackConnectionTimeoutWithTinyStackTimeout::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestStackConnectionTimeoutWithTinyStackTimeout::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestStackConnectionTimeoutWithTinyStackTimeout::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestStackConnectionTimeoutWithTinyStackTimeout::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestStackConnectionTimeoutWithTinyStackTimeout::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestStackConnectionTimeoutWithTinyStackTimeout::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionViaDefaultConnectionWorkflow::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionViaDefaultConnectionWorkflow::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionViaDefaultConnectionWorkflow::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionViaDefaultConnectionWorkflow::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionViaDefaultConnectionWorkflow::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionViaDefaultConnectionWorkflow::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionViaCustomConnectionWorkflow::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionViaCustomConnectionWorkflow::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionViaCustomConnectionWorkflow::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionViaCustomConnectionWorkflow::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionViaCustomConnectionWorkflow::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionViaCustomConnectionWorkflow::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionMultipleConfigsDefaultConnectionWorkflow::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionMultipleConfigsDefaultConnectionWorkflow::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionMultipleConfigsDefaultConnectionWorkflow::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionMultipleConfigsDefaultConnectionWorkflow::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionMultipleConfigsDefaultConnectionWorkflow::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionMultipleConfigsDefaultConnectionWorkflow::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionRetriesWithDefaultConnectionWorkflow::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionRetriesWithDefaultConnectionWorkflow::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionRetriesWithDefaultConnectionWorkflow::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionRetriesWithDefaultConnectionWorkflow::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionRetriesWithDefaultConnectionWorkflow::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionRetriesWithDefaultConnectionWorkflow::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionConnectionWorkflowSocketConnectionFailure::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionConnectionWorkflowSocketConnectionFailure::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionConnectionWorkflowSocketConnectionFailure::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionConnectionWorkflowSocketConnectionFailure::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionConnectionWorkflowSocketConnectionFailure::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionConnectionWorkflowSocketConnectionFailure::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAMQPHandshakeTimesOutDefaultWorkflow::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAMQPHandshakeTimesOutDefaultWorkflow::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAMQPHandshakeTimesOutDefaultWorkflow::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAMQPHandshakeTimesOutDefaultWorkflow::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAMQPHandshakeTimesOutDefaultWorkflow::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAMQPHandshakeTimesOutDefaultWorkflow::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAndImmediatelyAbortDefaultConnectionWorkflow::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAndImmediatelyAbortDefaultConnectionWorkflow::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAndImmediatelyAbortDefaultConnectionWorkflow::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAndImmediatelyAbortDefaultConnectionWorkflow::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAndImmediatelyAbortDefaultConnectionWorkflow::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAndImmediatelyAbortDefaultConnectionWorkflow::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAndAsynchronouslyAbortDefaultConnectionWorkflow::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAndAsynchronouslyAbortDefaultConnectionWorkflow::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAndAsynchronouslyAbortDefaultConnectionWorkflow::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAndAsynchronouslyAbortDefaultConnectionWorkflow::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAndAsynchronouslyAbortDefaultConnectionWorkflow::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestCreateConnectionAndAsynchronouslyAbortDefaultConnectionWorkflow::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestConfirmSelect::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestConfirmSelect::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestConfirmSelect::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestConfirmSelect::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestConfirmSelect::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestConfirmSelect::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestBlockingNonBlockingBlockingRPCWontStall::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestBlockingNonBlockingBlockingRPCWontStall::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestBlockingNonBlockingBlockingRPCWontStall::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestBlockingNonBlockingBlockingRPCWontStall::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestBlockingNonBlockingBlockingRPCWontStall::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestBlockingNonBlockingBlockingRPCWontStall::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestConsumeCancel::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestConsumeCancel::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestConsumeCancel::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestConsumeCancel::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestConsumeCancel::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestConsumeCancel::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestExchangeDeclareAndDelete::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestExchangeDeclareAndDelete::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestExchangeDeclareAndDelete::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestExchangeDeclareAndDelete::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestExchangeDeclareAndDelete::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestExchangeDeclareAndDelete::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestExchangeRedeclareWithDifferentValues::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestExchangeRedeclareWithDifferentValues::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestExchangeRedeclareWithDifferentValues::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestExchangeRedeclareWithDifferentValues::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestExchangeRedeclareWithDifferentValues::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestExchangeRedeclareWithDifferentValues::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestNoDeadlockWhenClosingChannelWithPendingBlockedRequestsAndConcurrentChannelCloseFromBroker::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestNoDeadlockWhenClosingChannelWithPendingBlockedRequestsAndConcurrentChannelCloseFromBroker::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestNoDeadlockWhenClosingChannelWithPendingBlockedRequestsAndConcurrentChannelCloseFromBroker::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestNoDeadlockWhenClosingChannelWithPendingBlockedRequestsAndConcurrentChannelCloseFromBroker::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestNoDeadlockWhenClosingChannelWithPendingBlockedRequestsAndConcurrentChannelCloseFromBroker::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestNoDeadlockWhenClosingChannelWithPendingBlockedRequestsAndConcurrentChannelCloseFromBroker::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestClosingAChannelPermitsBlockedRequestToComplete::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestClosingAChannelPermitsBlockedRequestToComplete::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestClosingAChannelPermitsBlockedRequestToComplete::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestClosingAChannelPermitsBlockedRequestToComplete::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestClosingAChannelPermitsBlockedRequestToComplete::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestClosingAChannelPermitsBlockedRequestToComplete::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestQueueUnnamedDeclareAndDelete::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestQueueUnnamedDeclareAndDelete::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestQueueUnnamedDeclareAndDelete::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestQueueUnnamedDeclareAndDelete::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestQueueUnnamedDeclareAndDelete::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestQueueUnnamedDeclareAndDelete::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestQueueNamedDeclareAndDelete::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestQueueNamedDeclareAndDelete::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestQueueNamedDeclareAndDelete::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestQueueNamedDeclareAndDelete::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestQueueNamedDeclareAndDelete::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestQueueNamedDeclareAndDelete::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestQueueRedeclareWithDifferentValues::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestQueueRedeclareWithDifferentValues::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestQueueRedeclareWithDifferentValues::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestQueueRedeclareWithDifferentValues::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestQueueRedeclareWithDifferentValues::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestQueueRedeclareWithDifferentValues::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestTX1_Select::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestTX1_Select::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestTX1_Select::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestTX1_Select::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestTX1_Select::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestTX1_Select::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestTX2_Commit::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestTX2_Commit::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestTX2_Commit::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestTX2_Commit::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestTX2_Commit::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestTX2_Commit::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestTX2_CommitFailure::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestTX2_CommitFailure::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestTX2_CommitFailure::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestTX2_CommitFailure::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestTX2_CommitFailure::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestTX2_CommitFailure::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestTX3_Rollback::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestTX3_Rollback::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestTX3_Rollback::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestTX3_Rollback::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestTX3_Rollback::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestTX3_Rollback::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestTX3_RollbackFailure::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestTX3_RollbackFailure::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestTX3_RollbackFailure::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestTX3_RollbackFailure::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestTX3_RollbackFailure::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestTX3_RollbackFailure::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndConsume::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndConsume::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndConsume::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndConsume::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndConsume::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndConsume::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndConsumeBig::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndConsumeBig::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndConsumeBig::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndConsumeBig::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndConsumeBig::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndConsumeBig::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndGet::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndGet::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndGet::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndGet::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndGet::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestZ_PublishAndGet::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestZ_AccessDenied::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestZ_AccessDenied::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestZ_AccessDenied::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestZ_AccessDenied::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestZ_AccessDenied::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestZ_AccessDenied::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestBlockedConnectionTimesOut::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestBlockedConnectionTimesOut::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestBlockedConnectionTimesOut::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestBlockedConnectionTimesOut::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestBlockedConnectionTimesOut::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestBlockedConnectionTimesOut::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestBlockedConnectionUnblocks::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestBlockedConnectionUnblocks::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestBlockedConnectionUnblocks::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestBlockedConnectionUnblocks::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestBlockedConnectionUnblocks::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestBlockedConnectionUnblocks::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeRequestBeforeIOLoopStarts::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeRequestBeforeIOLoopStarts::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeRequestBeforeIOLoopStarts::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeRequestBeforeIOLoopStarts::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeRequestBeforeIOLoopStarts::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeRequestBeforeIOLoopStarts::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeFromIOLoopThread::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeFromIOLoopThread::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeFromIOLoopThread::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeFromIOLoopThread::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeFromIOLoopThread::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeFromIOLoopThread::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeFromAnotherThread::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeFromAnotherThread::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeFromAnotherThread::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeFromAnotherThread::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeFromAnotherThread::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestAddCallbackThreadsafeFromAnotherThread::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestIOLoopStopBeforeIOLoopStarts::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestIOLoopStopBeforeIOLoopStarts::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestIOLoopStopBeforeIOLoopStarts::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestIOLoopStopBeforeIOLoopStarts::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestIOLoopStopBeforeIOLoopStarts::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestIOLoopStopBeforeIOLoopStarts::test_with_tornado",
"tests/acceptance/async_adapter_tests.py::TestViabilityOfMultipleTimeoutsWithSameDeadlineAndCallback::test_with_asyncio",
"tests/acceptance/async_adapter_tests.py::TestViabilityOfMultipleTimeoutsWithSameDeadlineAndCallback::test_with_select_default",
"tests/acceptance/async_adapter_tests.py::TestViabilityOfMultipleTimeoutsWithSameDeadlineAndCallback::test_with_select_epoll",
"tests/acceptance/async_adapter_tests.py::TestViabilityOfMultipleTimeoutsWithSameDeadlineAndCallback::test_with_select_poll",
"tests/acceptance/async_adapter_tests.py::TestViabilityOfMultipleTimeoutsWithSameDeadlineAndCallback::test_with_select_select",
"tests/acceptance/async_adapter_tests.py::TestViabilityOfMultipleTimeoutsWithSameDeadlineAndCallback::test_with_tornado",
"tests/acceptance/blocking_adapter_test.py::TestCreateAndCloseConnection::test",
"tests/acceptance/blocking_adapter_test.py::TestCreateConnectionWithNoneSocketAndStackTimeouts::test",
"tests/acceptance/blocking_adapter_test.py::TestCreateConnectionFromTwoConfigsFirstUnreachable::test",
"tests/acceptance/blocking_adapter_test.py::TestMultiCloseConnectionRaisesWrongState::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionContextManagerClosesConnection::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionContextManagerExitSurvivesClosedConnection::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionContextManagerClosesConnectionAndPassesOriginalException::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionContextManagerClosesConnectionAndPassesSystemException::test",
"tests/acceptance/blocking_adapter_test.py::TestLostConnectionResultsInIsClosedConnectionAndChannel::test",
"tests/acceptance/blocking_adapter_test.py::TestInvalidExchangeTypeRaisesConnectionClosed::test",
"tests/acceptance/blocking_adapter_test.py::TestCreateAndCloseConnectionWithChannelAndConsumer::test",
"tests/acceptance/blocking_adapter_test.py::TestUsingInvalidQueueArgument::test",
"tests/acceptance/blocking_adapter_test.py::TestSuddenBrokerDisconnectBeforeChannel::test",
"tests/acceptance/blocking_adapter_test.py::TestNoAccessToFileDescriptorAfterConnectionClosed::test",
"tests/acceptance/blocking_adapter_test.py::TestDisconnectDuringConnectionStart::test",
"tests/acceptance/blocking_adapter_test.py::TestDisconnectDuringConnectionTune::test",
"tests/acceptance/blocking_adapter_test.py::TestProcessDataEvents::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionRegisterForBlockAndUnblock::test",
"tests/acceptance/blocking_adapter_test.py::TestBlockedConnectionTimeout::test",
"tests/acceptance/blocking_adapter_test.py::TestAddCallbackThreadsafeFromSameThread::test",
"tests/acceptance/blocking_adapter_test.py::TestAddCallbackThreadsafeFromAnotherThread::test",
"tests/acceptance/blocking_adapter_test.py::TestAddTimeoutRemoveTimeout::test",
"tests/acceptance/blocking_adapter_test.py::TestViabilityOfMultipleTimeoutsWithSameDeadlineAndCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestRemoveTimeoutFromTimeoutCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestSleep::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectionProperties::test",
"tests/acceptance/blocking_adapter_test.py::TestCreateAndCloseChannel::test",
"tests/acceptance/blocking_adapter_test.py::TestExchangeDeclareAndDelete::test",
"tests/acceptance/blocking_adapter_test.py::TestExchangeBindAndUnbind::test",
"tests/acceptance/blocking_adapter_test.py::TestQueueDeclareAndDelete::test",
"tests/acceptance/blocking_adapter_test.py::TestPassiveQueueDeclareOfUnknownQueueRaisesChannelClosed::test",
"tests/acceptance/blocking_adapter_test.py::TestQueueBindAndUnbindAndPurge::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicGet::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicReject::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicRejectNoRequeue::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicNack::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicNackNoRequeue::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicNackMultiple::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicRecoverWithRequeue::test",
"tests/acceptance/blocking_adapter_test.py::TestTxCommit::test",
"tests/acceptance/blocking_adapter_test.py::TestTxRollback::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicConsumeFromUnknownQueueRaisesChannelClosed::test",
"tests/acceptance/blocking_adapter_test.py::TestPublishAndBasicPublishWithPubacksUnroutable::test",
"tests/acceptance/blocking_adapter_test.py::TestConfirmDeliveryAfterUnroutableMessage::test",
"tests/acceptance/blocking_adapter_test.py::TestUnroutableMessagesReturnedInNonPubackMode::test",
"tests/acceptance/blocking_adapter_test.py::TestUnroutableMessageReturnedInPubackMode::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicPublishDeliveredWhenPendingUnroutable::test",
"tests/acceptance/blocking_adapter_test.py::TestPublishAndConsumeWithPubacksAndQosOfOne::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicConsumeWithAckFromAnotherThread::test",
"tests/acceptance/blocking_adapter_test.py::TestConsumeGeneratorWithAckFromAnotherThread::test",
"tests/acceptance/blocking_adapter_test.py::TestTwoBasicConsumersOnSameChannel::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicCancelPurgesPendingConsumerCancellationEvt::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicPublishWithoutPubacks::test",
"tests/acceptance/blocking_adapter_test.py::TestPublishFromBasicConsumeCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestStopConsumingFromBasicConsumeCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestCloseChannelFromBasicConsumeCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestCloseConnectionFromBasicConsumeCallback::test",
"tests/acceptance/blocking_adapter_test.py::TestStartConsumingRaisesChannelClosedOnSameChannelFailure::test",
"tests/acceptance/blocking_adapter_test.py::TestStartConsumingReturnsAfterCancelFromBroker::test",
"tests/acceptance/blocking_adapter_test.py::TestNonPubAckPublishAndConsumeHugeMessage::test",
"tests/acceptance/blocking_adapter_test.py::TestNonPubAckPublishAndConsumeManyMessages::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicCancelWithNonAckableConsumer::test",
"tests/acceptance/blocking_adapter_test.py::TestBasicCancelWithAckableConsumer::test",
"tests/acceptance/blocking_adapter_test.py::TestUnackedMessageAutoRestoredToQueueOnChannelClose::test",
"tests/acceptance/blocking_adapter_test.py::TestNoAckMessageNotRestoredToQueueOnChannelClose::test",
"tests/acceptance/blocking_adapter_test.py::TestConsumeGeneratorInactivityTimeout::test",
"tests/acceptance/blocking_adapter_test.py::TestConsumeGeneratorInterruptedByCancelFromBroker::test",
"tests/acceptance/blocking_adapter_test.py::TestConsumeGeneratorCancelEncountersCancelFromBroker::test",
"tests/acceptance/blocking_adapter_test.py::TestConsumeGeneratorPassesChannelClosedOnSameChannelFailure::test",
"tests/acceptance/blocking_adapter_test.py::TestChannelFlow::test"
] | [
"tests/acceptance/blocking_adapter_test.py::TestCreateConnectionFromTwoUnreachableConfigs::test",
"tests/acceptance/blocking_adapter_test.py::TestConnectWithDownedBroker::test",
"tests/acceptance/blocking_adapter_test.py::TestDisconnectDuringConnectionProtocol::test",
"tests/unit/channel_tests.py::ChannelTests::test_add_callback",
"tests/unit/channel_tests.py::ChannelTests::test_add_callback_multiple_replies",
"tests/unit/channel_tests.py::ChannelTests::test_add_callbacks_basic_cancel_empty_added",
"tests/unit/channel_tests.py::ChannelTests::test_add_callbacks_basic_get_empty_added",
"tests/unit/channel_tests.py::ChannelTests::test_add_callbacks_channel_close_added",
"tests/unit/channel_tests.py::ChannelTests::test_add_callbacks_channel_flow_added",
"tests/unit/channel_tests.py::ChannelTests::test_add_on_cancel_callback",
"tests/unit/channel_tests.py::ChannelTests::test_add_on_close_callback",
"tests/unit/channel_tests.py::ChannelTests::test_add_on_flow_callback",
"tests/unit/channel_tests.py::ChannelTests::test_add_on_return_callback",
"tests/unit/channel_tests.py::ChannelTests::test_basic_ack_calls_send_method",
"tests/unit/channel_tests.py::ChannelTests::test_basic_ack_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_asynch",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_asynch_with_user_callback_raises_value_error",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_calls_raise_if_not_open",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_synch",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_synch_no_user_callback_raises_value_error",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_then_close",
"tests/unit/channel_tests.py::ChannelTests::test_basic_cancel_unknown_consumer_tag",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_calls_raise_if_not_open",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumer_tag_cancelled_full",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumer_tag_in_consumers",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumer_tag_no_completion_callback",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumer_tag_with_completion_callback",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumers_callback_value",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumers_rpc_with_completion_callback",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_consumers_rpc_with_no_completion_callback",
"tests/unit/channel_tests.py::ChannelTests::test_basic_consume_duplicate_consumer_tag_raises",
"tests/unit/channel_tests.py::ChannelTests::test_basic_get_callback",
"tests/unit/channel_tests.py::ChannelTests::test_basic_get_calls_require_callback",
"tests/unit/channel_tests.py::ChannelTests::test_basic_get_send_method_called",
"tests/unit/channel_tests.py::ChannelTests::test_basic_get_send_method_called_auto_ack",
"tests/unit/channel_tests.py::ChannelTests::test_basic_nack_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_nack_send_method_request",
"tests/unit/channel_tests.py::ChannelTests::test_basic_publish_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_publish_send_method_request",
"tests/unit/channel_tests.py::ChannelTests::test_basic_qos_invalid_prefetch_count_raises_error",
"tests/unit/channel_tests.py::ChannelTests::test_basic_qos_invalid_prefetch_size_raises_error",
"tests/unit/channel_tests.py::ChannelTests::test_basic_qos_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_qos_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_basic_recover_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_recover_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_basic_reject_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_basic_reject_send_method_request_with_int_tag",
"tests/unit/channel_tests.py::ChannelTests::test_basic_reject_send_method_request_with_long_tag",
"tests/unit/channel_tests.py::ChannelTests::test_basic_reject_spec_with_int_tag",
"tests/unit/channel_tests.py::ChannelTests::test_basic_reject_spec_with_long_tag",
"tests/unit/channel_tests.py::ChannelTests::test_channel_open_add_callbacks_called",
"tests/unit/channel_tests.py::ChannelTests::test_cleanup",
"tests/unit/channel_tests.py::ChannelTests::test_close_basic_cancel_called",
"tests/unit/channel_tests.py::ChannelTests::test_close_in_closed_state_raises_channel_error_and_stays_closed",
"tests/unit/channel_tests.py::ChannelTests::test_close_in_closing_state_raises_already_closing",
"tests/unit/channel_tests.py::ChannelTests::test_close_in_open_state_transitions_to_closing",
"tests/unit/channel_tests.py::ChannelTests::test_close_in_opening_state",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_async",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_callback_basic_ack",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_callback_basic_nack",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_callback_without_nowait_selectok",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_callback_yes_basic_ack_callback",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_callback_yes_basic_nack_callback",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_no_callback_callback_call_count",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_raises_method_not_implemented_for_confirms",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_raises_method_not_implemented_for_nack",
"tests/unit/channel_tests.py::ChannelTests::test_confirm_delivery_with_bad_callback_raises_value_error",
"tests/unit/channel_tests.py::ChannelTests::test_consumer_tags",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_bind_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_bind_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_bind_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_bind_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_declare_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_delete_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_delete_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_delete_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_delete_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_unbind_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_unbind_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_unbind_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_exchange_unbind_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_flow_off_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_flow_on_flowok_callback",
"tests/unit/channel_tests.py::ChannelTests::test_flow_on_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_flow_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_flow_raises_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_basic_deliver_called",
"tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_basic_get_called",
"tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_basic_return_called",
"tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_method_returns_none",
"tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_sets_header_frame",
"tests/unit/channel_tests.py::ChannelTests::test_handle_content_frame_sets_method_frame",
"tests/unit/channel_tests.py::ChannelTests::test_has_content_false",
"tests/unit/channel_tests.py::ChannelTests::test_has_content_true",
"tests/unit/channel_tests.py::ChannelTests::test_immediate_called_logger_warning",
"tests/unit/channel_tests.py::ChannelTests::test_init_blocked",
"tests/unit/channel_tests.py::ChannelTests::test_init_blocking",
"tests/unit/channel_tests.py::ChannelTests::test_init_callbacks",
"tests/unit/channel_tests.py::ChannelTests::test_init_cancelled",
"tests/unit/channel_tests.py::ChannelTests::test_init_channel_number",
"tests/unit/channel_tests.py::ChannelTests::test_init_connection",
"tests/unit/channel_tests.py::ChannelTests::test_init_consumers",
"tests/unit/channel_tests.py::ChannelTests::test_init_content_frame_assembler",
"tests/unit/channel_tests.py::ChannelTests::test_init_flow",
"tests/unit/channel_tests.py::ChannelTests::test_init_has_on_flow_callback",
"tests/unit/channel_tests.py::ChannelTests::test_init_invalid_channel_number",
"tests/unit/channel_tests.py::ChannelTests::test_init_on_flowok_callback",
"tests/unit/channel_tests.py::ChannelTests::test_init_on_getok_callback",
"tests/unit/channel_tests.py::ChannelTests::test_init_on_openok_callback",
"tests/unit/channel_tests.py::ChannelTests::test_init_state",
"tests/unit/channel_tests.py::ChannelTests::test_is_closed_false",
"tests/unit/channel_tests.py::ChannelTests::test_is_closed_true",
"tests/unit/channel_tests.py::ChannelTests::test_is_closing_false",
"tests/unit/channel_tests.py::ChannelTests::test_is_closing_true",
"tests/unit/channel_tests.py::ChannelTests::test_on_cancel_not_appended_cancelled",
"tests/unit/channel_tests.py::ChannelTests::test_on_cancel_removed_consumer",
"tests/unit/channel_tests.py::ChannelTests::test_on_cancelok_removed_consumer",
"tests/unit/channel_tests.py::ChannelTests::test_on_close_from_broker_in_closing_state",
"tests/unit/channel_tests.py::ChannelTests::test_on_close_from_broker_in_open_state",
"tests/unit/channel_tests.py::ChannelTests::test_on_close_from_broker_warning",
"tests/unit/channel_tests.py::ChannelTests::test_on_close_meta_in_closed_state_is_suppressed",
"tests/unit/channel_tests.py::ChannelTests::test_on_close_meta_in_closing_state_transitions_to_closed",
"tests/unit/channel_tests.py::ChannelTests::test_on_close_meta_in_open_state_transitions_to_closed",
"tests/unit/channel_tests.py::ChannelTests::test_on_close_meta_in_opening_state_transitions_to_closed",
"tests/unit/channel_tests.py::ChannelTests::test_on_closeok",
"tests/unit/channel_tests.py::ChannelTests::test_on_closeok_following_close_from_broker",
"tests/unit/channel_tests.py::ChannelTests::test_on_confirm_selectok",
"tests/unit/channel_tests.py::ChannelTests::test_on_deliver_callback_called",
"tests/unit/channel_tests.py::ChannelTests::test_on_eventok",
"tests/unit/channel_tests.py::ChannelTests::test_on_flow",
"tests/unit/channel_tests.py::ChannelTests::test_on_flow_with_callback",
"tests/unit/channel_tests.py::ChannelTests::test_on_flowok",
"tests/unit/channel_tests.py::ChannelTests::test_on_flowok_callback_reset",
"tests/unit/channel_tests.py::ChannelTests::test_on_flowok_calls_callback",
"tests/unit/channel_tests.py::ChannelTests::test_on_getempty",
"tests/unit/channel_tests.py::ChannelTests::test_on_getok_callback_called",
"tests/unit/channel_tests.py::ChannelTests::test_on_getok_callback_reset",
"tests/unit/channel_tests.py::ChannelTests::test_on_getok_no_callback",
"tests/unit/channel_tests.py::ChannelTests::test_on_openok_callback_called",
"tests/unit/channel_tests.py::ChannelTests::test_on_openok_no_callback",
"tests/unit/channel_tests.py::ChannelTests::test_on_synchronous_complete",
"tests/unit/channel_tests.py::ChannelTests::test_onreturn",
"tests/unit/channel_tests.py::ChannelTests::test_onreturn_warning",
"tests/unit/channel_tests.py::ChannelTests::test_queue_bind_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_queue_bind_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_queue_bind_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_queue_bind_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_queue_declare_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_queue_declare_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_queue_declare_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_queue_declare_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_queue_delete_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_queue_delete_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_queue_delete_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_queue_delete_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_queue_purge_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_queue_purge_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_queue_purge_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_queue_purge_rpc_request_nowait",
"tests/unit/channel_tests.py::ChannelTests::test_queue_unbind_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_queue_unbind_raises_value_error_on_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_queue_unbind_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_raise_if_not_open_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_repr",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_adds_callback",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_enters_blocking_and_adds_on_synchronous_complete",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_not_blocking_and_no_on_synchronous_complete_when_no_replies",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_throws_type_error_with_invalid_callback",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_throws_value_error_with_unacceptable_replies",
"tests/unit/channel_tests.py::ChannelTests::test_rpc_while_blocking_appends_blocked_collection",
"tests/unit/channel_tests.py::ChannelTests::test_send_method",
"tests/unit/channel_tests.py::ChannelTests::test_set_state",
"tests/unit/channel_tests.py::ChannelTests::test_tx_commit_raises_channel_closed",
"tests/unit/channel_tests.py::ChannelTests::test_tx_commit_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_tx_rollback_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_tx_select_rpc_request",
"tests/unit/channel_tests.py::ChannelTests::test_validate_callback_raises_value_error_not_callable",
"tests/unit/connection_tests.py::ConnectionTests::test_add_callbacks",
"tests/unit/connection_tests.py::ConnectionTests::test_add_on_close_callback",
"tests/unit/connection_tests.py::ConnectionTests::test_add_on_connection_blocked_callback",
"tests/unit/connection_tests.py::ConnectionTests::test_add_on_connection_unblocked_callback",
"tests/unit/connection_tests.py::ConnectionTests::test_add_on_open_error_callback",
"tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_multiple_blocked_in_a_row_sets_timer_once",
"tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_multiple_unblocked_in_a_row_removes_timer_once",
"tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_on_stream_terminated_removes_timer",
"tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_timeout_terminates_connection",
"tests/unit/connection_tests.py::ConnectionTests::test_blocked_connection_unblocked_removes_timer",
"tests/unit/connection_tests.py::ConnectionTests::test_channel",
"tests/unit/connection_tests.py::ConnectionTests::test_channel_on_closed_connection_raises_connection_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_channel_on_closing_connection_raises_connection_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_channel_on_init_connection_raises_connection_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_channel_on_protocol_connection_raises_connection_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_channel_on_start_connection_raises_connection_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_channel_on_tune_connection_raises_connection_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_client_properties",
"tests/unit/connection_tests.py::ConnectionTests::test_client_properties_default",
"tests/unit/connection_tests.py::ConnectionTests::test_client_properties_override",
"tests/unit/connection_tests.py::ConnectionTests::test_close_calls_on_close_ready_when_no_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_close_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_close_closes_open_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_close_closes_opening_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_close_does_not_close_closing_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_close_raises_wrong_state_when_already_closed_or_closing",
"tests/unit/connection_tests.py::ConnectionTests::test_connect_no_adapter_connect_from_constructor_with_external_workflow",
"tests/unit/connection_tests.py::ConnectionTests::test_connection_blocked_sets_timer",
"tests/unit/connection_tests.py::ConnectionTests::test_create_with_blocked_connection_timeout_config",
"tests/unit/connection_tests.py::ConnectionTests::test_deliver_frame_to_channel_with_frame_for_unknown_channel",
"tests/unit/connection_tests.py::ConnectionTests::test_new_conn_should_use_first_channel",
"tests/unit/connection_tests.py::ConnectionTests::test_next_channel_number_returns_lowest_unused",
"tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_closing_state_last_channel_calls_on_close_ready",
"tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_closing_state_more_channels_no_on_close_ready",
"tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_non_closing_state",
"tests/unit/connection_tests.py::ConnectionTests::test_on_channel_cleanup_with_closing_channels",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connection_close_from_broker_passes_correct_exception",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connection_close_ok",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connection_start",
"tests/unit/connection_tests.py::ConnectionTests::test_on_connection_tune",
"tests/unit/connection_tests.py::ConnectionTests::test_on_data_available",
"tests/unit/connection_tests.py::ConnectionTests::test_on_stream_connected",
"tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_cleans_up",
"tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_invokes_access_denied_on_connection_error_and_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_invokes_auth_on_connection_error_and_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_invokes_connection_closed_callback",
"tests/unit/connection_tests.py::ConnectionTests::test_on_stream_terminated_invokes_protocol_on_connection_error_and_closed",
"tests/unit/connection_tests.py::ConnectionTests::test_send_message_updates_frames_sent_and_bytes_sent",
"tests/unit/connection_tests.py::ConnectionTests::test_set_backpressure_multiplier"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,266 | 1,045 | [
"pika/channel.py",
"pika/connection.py"
] |
nipy__nipype-2490 | 88dbce1ce5439440bcc14c9aa46666c40f642152 | 2018-03-07 21:23:46 | 704b97dee7848283692bac38f04541c5af2a87b5 | diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py
index 9f228f5c5..21ecbc0ee 100644
--- a/nipype/utils/nipype2boutiques.py
+++ b/nipype/utils/nipype2boutiques.py
@@ -2,7 +2,7 @@
from __future__ import (print_function, division, unicode_literals,
absolute_import)
-from builtins import str, open
+from builtins import str, open, bytes
# This tool exports a Nipype interface in the Boutiques (https://github.com/boutiques) JSON format.
# Boutiques tools can be imported in CBRAIN (https://github.com/aces/cbrain) among other platforms.
#
@@ -40,10 +40,12 @@ def generate_boutiques_descriptor(
raise Exception("Undefined module.")
# Retrieves Nipype interface
- if isinstance(module, str):
+ if isinstance(module, (str, bytes)):
import_module(module)
module_name = str(module)
module = sys.modules[module]
+ else:
+ module_name = str(module.__name__)
interface = getattr(module, interface_name)()
inputs = interface.input_spec()
@@ -249,7 +251,7 @@ def create_tempfile():
Creates a temp file and returns its name.
'''
fileTemp = tempfile.NamedTemporaryFile(delete=False)
- fileTemp.write("hello")
+ fileTemp.write(b"hello")
fileTemp.close()
return fileTemp.name
@@ -283,6 +285,8 @@ def must_generate_value(name, type, ignored_template_inputs, spec_info, spec,
# Best guess to detect string restrictions...
if "' or '" in spec_info:
return False
+ if spec.default or spec.default_value():
+ return False
if not ignored_template_inputs:
return True
return not (name in ignored_template_inputs)
| UnboundLocalError: local variable 'module_name' referenced before assignment
### Summary
Discovered for myself `nipypecli` and decided to give it a try while composing cmdline invocation just following the errors it was spitting out at me and stopping when error didn't give a hint what I could have specified incorrectly:
```
$> nipypecli convert boutiques -m nipype.interfaces.ants.registration -i ANTS -o test
Traceback (most recent call last):
File "/usr/bin/nipypecli", line 11, in <module>
load_entry_point('nipype==1.0.1', 'console_scripts', 'nipypecli')()
File "/usr/lib/python2.7/dist-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/lib/python2.7/dist-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/lib/python2.7/dist-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/lib/python2.7/dist-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib/python2.7/dist-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/nipype/scripts/cli.py", line 254, in boutiques
verbose, ignore_template_numbers)
File "/usr/lib/python2.7/dist-packages/nipype/utils/nipype2boutiques.py", line 56, in generate_boutiques_descriptor
'command-line'] = "nipype_cmd " + module_name + " " + interface_name + " "
UnboundLocalError: local variable 'module_name' referenced before assignment
```
| nipy/nipype | diff --git a/nipype/utils/tests/test_nipype2boutiques.py b/nipype/utils/tests/test_nipype2boutiques.py
new file mode 100644
index 000000000..f1d0c46ee
--- /dev/null
+++ b/nipype/utils/tests/test_nipype2boutiques.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+from future import standard_library
+standard_library.install_aliases()
+
+from ..nipype2boutiques import generate_boutiques_descriptor
+
+
+def test_generate():
+ generate_boutiques_descriptor(module='nipype.interfaces.ants.registration',
+ interface_name='ANTS',
+ ignored_template_inputs=(),
+ docker_image=None,
+ docker_index=None,
+ verbose=False,
+ ignore_template_numbers=False)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
codecov==2.1.13
configparser==5.2.0
coverage==6.2
cycler==0.11.0
decorator==4.4.2
docutils==0.18.1
execnet==1.9.0
funcsigs==1.0.2
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
Jinja2==3.0.3
kiwisolver==1.3.1
lxml==5.3.1
MarkupSafe==2.0.1
matplotlib==3.3.4
mock==5.2.0
networkx==2.5.1
nibabel==3.2.2
-e git+https://github.com/nipy/nipype.git@88dbce1ce5439440bcc14c9aa46666c40f642152#egg=nipype
numpy==1.19.5
numpydoc==1.1.0
packaging==21.3
Pillow==8.4.0
pluggy==1.0.0
prov==1.5.0
py==1.11.0
pydot==1.4.2
pydotplus==2.0.2
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-env==0.6.2
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
rdflib==5.0.0
requests==2.27.1
scipy==1.5.4
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
traits==6.4.1
typing_extensions==4.1.1
urllib3==1.26.20
yapf==0.32.0
zipp==3.6.0
| name: nipype
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- click==8.0.4
- codecov==2.1.13
- configparser==5.2.0
- coverage==6.2
- cycler==0.11.0
- decorator==4.4.2
- docutils==0.18.1
- execnet==1.9.0
- funcsigs==1.0.2
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- jinja2==3.0.3
- kiwisolver==1.3.1
- lxml==5.3.1
- markupsafe==2.0.1
- matplotlib==3.3.4
- mock==5.2.0
- networkx==2.5.1
- nibabel==3.2.2
- numpy==1.19.5
- numpydoc==1.1.0
- packaging==21.3
- pillow==8.4.0
- pluggy==1.0.0
- prov==1.5.0
- py==1.11.0
- pydot==1.4.2
- pydotplus==2.0.2
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-env==0.6.2
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- rdflib==5.0.0
- requests==2.27.1
- scipy==1.5.4
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- traits==6.4.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- yapf==0.32.0
- zipp==3.6.0
prefix: /opt/conda/envs/nipype
| [
"nipype/utils/tests/test_nipype2boutiques.py::test_generate"
] | [] | [] | [] | Apache License 2.0 | 2,269 | 447 | [
"nipype/utils/nipype2boutiques.py"
] |
|
elastic__rally-430 | 188495b1fdcd8f9e36625ca2b10f1c9659750290 | 2018-03-08 13:23:20 | a5408e0d0d07b271b509df8057a7c73303604c10 | diff --git a/esrally/config.py b/esrally/config.py
index a78ebf77..b8e0d79d 100644
--- a/esrally/config.py
+++ b/esrally/config.py
@@ -447,6 +447,7 @@ class ConfigFactory:
# the Elasticsearch directory is just the last path component (relative to the source root directory)
config["source"]["elasticsearch.src.subdir"] = io.basename(source_dir)
+ if gradle_bin:
config["build"] = {}
config["build"]["gradle.bin"] = gradle_bin
diff --git a/esrally/track/loader.py b/esrally/track/loader.py
index 513b1e4f..d04de55e 100644
--- a/esrally/track/loader.py
+++ b/esrally/track/loader.py
@@ -757,9 +757,10 @@ class TrackSpecificationReader:
Creates a track instances based on its parsed JSON description.
"""
- def __init__(self, override_auto_manage_indices=None, source=io.FileSource):
+ def __init__(self, override_auto_manage_indices=None, track_params=None, source=io.FileSource):
self.name = None
self.override_auto_manage_indices = override_auto_manage_indices
+ self.track_params = track_params if track_params else {}
self.source = source
self.index_op_type_warning_issued = False
@@ -770,7 +771,7 @@ class TrackSpecificationReader:
meta_data = self._r(track_specification, "meta", mandatory=False)
indices = [self._create_index(idx, mapping_dir)
for idx in self._r(track_specification, "indices", mandatory=False, default_value=[])]
- templates = [self._create_template(tpl, mapping_dir)
+ templates = [self._create_index_template(tpl, mapping_dir)
for tpl in self._r(track_specification, "templates", mandatory=False, default_value=[])]
corpora = self._create_corpora(self._r(track_specification, "corpora", mandatory=False, default_value=[]), indices)
# TODO: Remove this in Rally 0.10.0
@@ -816,7 +817,7 @@ class TrackSpecificationReader:
body_file = self._r(index_spec, "body", mandatory=False)
if body_file:
with self.source(os.path.join(mapping_dir, body_file), "rt") as f:
- body = json.load(f)
+ body = self._load_template(f.read(), index_name)
else:
body = None
@@ -832,15 +833,25 @@ class TrackSpecificationReader:
return track.Index(name=index_name, body=body, auto_managed=auto_managed, types=types)
- def _create_template(self, tpl_spec, mapping_dir):
+ def _create_index_template(self, tpl_spec, mapping_dir):
name = self._r(tpl_spec, "name")
index_pattern = self._r(tpl_spec, "index-pattern")
delete_matching_indices = self._r(tpl_spec, "delete-matching-indices", mandatory=False, default_value=True)
template_file = os.path.join(mapping_dir, self._r(tpl_spec, "template"))
with self.source(template_file, "rt") as f:
- template_content = json.load(f)
+ template_content = self._load_template(f.read(), name)
return track.IndexTemplate(name, index_pattern, template_content, delete_matching_indices)
+ def _load_template(self, contents, description):
+ try:
+ rendered = render_template(loader=jinja2.DictLoader({"default": contents}),
+ template_name="default",
+ template_vars=self.track_params)
+ return json.loads(rendered)
+ except (json.JSONDecodeError, jinja2.exceptions.TemplateError) as e:
+ logger.exception("Could not load file template for %s." % description)
+ raise TrackSyntaxError("Could not load file template for '%s'" % description, str(e))
+
def _create_corpora(self, corpora_specs, indices):
document_corpora = []
known_corpora_names = set()
| Allow to use track parameters also in index / template definitions
Currently index definitions are read as is. However, it can be useful to allow track parameters also for index definition files. For consistency, we should allow the same for index templates.
Technically, this means that we will treat index definition files also as Jinja templates. | elastic/rally | diff --git a/tests/track/loader_test.py b/tests/track/loader_test.py
index 4f9e524e..ea6b4996 100644
--- a/tests/track/loader_test.py
+++ b/tests/track/loader_test.py
@@ -1426,8 +1426,20 @@ class TrackSpecificationReaderTests(TestCase):
}
]
}
- reader = loader.TrackSpecificationReader(source=io.DictStringFileSourceFactory({
- "/mappings/body.json": ['{"mappings": {"main": "empty-for-test", "secondary": "empty-for-test"}}']
+ reader = loader.TrackSpecificationReader(
+ track_params={"number_of_shards": 3},
+ source=io.DictStringFileSourceFactory({
+ "/mappings/body.json": ["""
+ {
+ "settings": {
+ "number_of_shards": {{ number_of_shards }}
+ },
+ "mappings": {
+ "main": "empty-for-test",
+ "secondary": "empty-for-test"
+ }
+ }
+ """]
}))
resulting_track = reader("unittest", track_specification, "/mappings")
self.assertEqual("unittest", resulting_track.name)
@@ -1435,7 +1447,16 @@ class TrackSpecificationReaderTests(TestCase):
# indices
self.assertEqual(1, len(resulting_track.indices))
self.assertEqual("index-historical", resulting_track.indices[0].name)
- self.assertDictEqual({"mappings": {"main": "empty-for-test", "secondary": "empty-for-test"}}, resulting_track.indices[0].body)
+ self.assertDictEqual({
+ "settings": {
+ "number_of_shards": 3
+ },
+ "mappings":
+ {
+ "main": "empty-for-test",
+ "secondary": "empty-for-test"
+ }
+ }, resulting_track.indices[0].body)
self.assertEqual(2, len(resulting_track.indices[0].types))
self.assertEqual("main", resulting_track.indices[0].types[0].name)
self.assertEqual("secondary", resulting_track.indices[0].types[1].name)
@@ -1492,8 +1513,17 @@ class TrackSpecificationReaderTests(TestCase):
"operations": [],
"challenges": []
}
- reader = loader.TrackSpecificationReader(source=io.DictStringFileSourceFactory({
- "/mappings/default-template.json": ['{"some-index-template": "empty-for-test"}'],
+ reader = loader.TrackSpecificationReader(
+ track_params={"index_pattern": "*"},
+ source=io.DictStringFileSourceFactory({
+ "/mappings/default-template.json": ["""
+ {
+ "index_patterns": [ "{{index_pattern}}"],
+ "settings": {
+ "number_of_shards": {{ number_of_shards | default(1) }}
+ }
+ }
+ """],
}))
resulting_track = reader("unittest", track_specification, "/mappings")
self.assertEqual("unittest", resulting_track.name)
@@ -1502,7 +1532,13 @@ class TrackSpecificationReaderTests(TestCase):
self.assertEqual(1, len(resulting_track.templates))
self.assertEqual("my-index-template", resulting_track.templates[0].name)
self.assertEqual("*", resulting_track.templates[0].pattern)
- self.assertEqual({"some-index-template": "empty-for-test"}, resulting_track.templates[0].content)
+ self.assertDictEqual(
+ {
+ "index_patterns": ["*"],
+ "settings": {
+ "number_of_shards": 1
+ }
+ }, resulting_track.templates[0].content)
self.assertEqual(0, len(resulting_track.challenges))
def test_types_are_optional_for_user_managed_indices(self):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.9 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-benchmark"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc python3-pip python3-dev"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
elasticsearch==6.0.0
-e git+https://github.com/elastic/rally.git@188495b1fdcd8f9e36625ca2b10f1c9659750290#egg=esrally
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==2.9.5
jsonschema==2.5.1
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==5.4.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
py-cpuinfo==3.2.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-benchmark==3.4.1
tabulate==0.8.1
thespian==3.9.2
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.22
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: rally
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- elasticsearch==6.0.0
- jinja2==2.9.5
- jsonschema==2.5.1
- markupsafe==2.0.1
- psutil==5.4.0
- py-cpuinfo==3.2.0
- pytest-benchmark==3.4.1
- tabulate==0.8.1
- thespian==3.9.2
- urllib3==1.22
prefix: /opt/conda/envs/rally
| [
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_valid_track_specification",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_valid_track_specification_with_index_template"
] | [] | [
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_directory",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_directory_without_track",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_file",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_file_but_not_json",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_named_pipe",
"tests/track/loader_test.py::SimpleTrackRepositoryTests::test_track_from_non_existing_path",
"tests/track/loader_test.py::GitRepositoryTests::test_track_from_existing_repo",
"tests/track/loader_test.py::TrackPreparationTests::test_decompresses_if_archive_available",
"tests/track/loader_test.py::TrackPreparationTests::test_does_nothing_if_document_file_available",
"tests/track/loader_test.py::TrackPreparationTests::test_download_document_archive_if_no_file_available",
"tests/track/loader_test.py::TrackPreparationTests::test_download_document_file_if_no_file_available",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_decompresses_compressed_docs",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_does_nothing_if_no_document_files",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_error_compressed_docs_wrong_size",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_if_document_file_available",
"tests/track/loader_test.py::TrackPreparationTests::test_prepare_bundled_document_set_uncompressed_docs_wrong_size",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_if_no_url_provided_and_file_missing",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_if_no_url_provided_and_wrong_file_size",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_if_offline",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_no_test_mode_file",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_download_error_on_connection_problems",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_error_if_compressed_does_not_contain_expected_document_file",
"tests/track/loader_test.py::TrackPreparationTests::test_raise_error_on_wrong_uncompressed_file_size",
"tests/track/loader_test.py::TemplateRenderTests::test_render_simple_template",
"tests/track/loader_test.py::TemplateRenderTests::test_render_template_with_external_variables",
"tests/track/loader_test.py::TemplateRenderTests::test_render_template_with_globbing",
"tests/track/loader_test.py::TemplateRenderTests::test_render_template_with_variables",
"tests/track/loader_test.py::TrackPostProcessingTests::test_creates_index_auto_management_operations",
"tests/track/loader_test.py::TrackPostProcessingTests::test_post_processes_track_spec",
"tests/track/loader_test.py::TrackPathTests::test_sets_absolute_path",
"tests/track/loader_test.py::TrackFilterTests::test_create_filters_from_empty_included_tasks",
"tests/track/loader_test.py::TrackFilterTests::test_create_filters_from_mixed_included_tasks",
"tests/track/loader_test.py::TrackFilterTests::test_filters_tasks",
"tests/track/loader_test.py::TrackFilterTests::test_rejects_invalid_syntax",
"tests/track/loader_test.py::TrackFilterTests::test_rejects_unknown_filter_type",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_at_least_one_default_challenge",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_can_read_track_info",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_description_is_optional",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_document_count_mandatory_if_file_present",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_exactly_one_default_challenge",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_inline_operations",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_not_more_than_one_default_challenge_possible",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_completed_by_set",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_completed_by_set_multiple_tasks_match",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_completed_by_set_no_task_matches",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_default_clients_does_not_propagate",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parallel_tasks_with_default_values",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_challenge_and_challenges_are_defined",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_duplicate_explicit_task_names",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_duplicate_implicit_task_names",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_missing_challenge_or_challenges",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_unique_task_names",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_with_mixed_warmup_iterations_and_measurement",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_parse_with_mixed_warmup_time_period_and_iterations",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_selects_sole_challenge_implicitly_as_default",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_supports_target_interval",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_supports_target_throughput",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_types_are_optional_for_user_managed_indices",
"tests/track/loader_test.py::TrackSpecificationReaderTests::test_unique_challenge_names"
] | [] | Apache License 2.0 | 2,275 | 934 | [
"esrally/config.py",
"esrally/track/loader.py"
] |
|
pypa__setuptools_scm-220 | 632b02b8ad1f10c79d36c03502069d74c9686c73 | 2018-03-08 19:18:48 | 0373c11d2c8968a857ff06c94f101abebf825507 | avirshup: Here's my stab at a fix for #219. Please let me know if this seems reasonable and/or if there are any changes that should be made. | diff --git a/setuptools_scm/hg.py b/setuptools_scm/hg.py
index 9322fb3..0ba1774 100644
--- a/setuptools_scm/hg.py
+++ b/setuptools_scm/hg.py
@@ -8,8 +8,14 @@ FILES_COMMAND = 'hg locate -I .'
def _hg_tagdist_normalize_tagcommit(root, tag, dist, node):
dirty = node.endswith('+')
node = 'h' + node.strip('+')
- revset = ("(branch(.) and tag({tag!r})::. and file('re:^(?!\.hgtags).*$')"
- " - tag({tag!r}))").format(tag=tag)
+
+ # Detect changes since the specified tag
+ revset = ("(branch(.)" # look for revisions in this branch only
+ " and tag({tag!r})::." # after the last tag
+ # ignore commits that only modify .hgtags and nothing else:
+ " and (merge() or file('re:^(?!\.hgtags).*$'))"
+ " and not tag({tag!r}))" # ignore the tagged commit itself
+ ).format(tag=tag)
if tag != '0.0':
commits = do(['hg', 'log', '-r', revset, '--template', '{node|short}'],
root)
| Regression: hg repos with setuptools_scm>=1.15 are missing dev version bumps
`setuptools_scm` 1.15.* is reporting incorrect version numbers for my hg repositories.
Specifically, a merge commit that follows a tag commit does not seem to trigger the version bump like I would expect. In the screenshot below, for instance, I would expect the tip to be version `1.1.dev[N]+[sha]`. However, versions 1.15.* report the tip's version as `1.0`.
To reproduce:
```bash
mkdir hgmergetest && cd hgmergetest && hg init
# create initial commit and tag it
touch a && hg add a && hg commit -m "initial commit"
hg tag 1.0
# create a branch
hg branch branch1
touch b && hg add b && hg commit -m "create branch1"
# merge the branch into default
hg update default && hg merge branch1 && hg commit -m "merge branch1 into default"
```
Expected behavior (setuptools_scm 1.13 and 1.14):
```bash
> pip install "setuptools_scm<1.15"
> python -m setuptools_scm
Guessed Version 1.1.dev3+n8dce1535e70a
```
Behavior with `1.15.*`:
```bash
> pip install setuptools_scm==1.15.7
> python -m setuptools_scm
Guessed Version 1.0
```
 | pypa/setuptools_scm | diff --git a/testing/test_mercurial.py b/testing/test_mercurial.py
index 1fe6841..1d91444 100644
--- a/testing/test_mercurial.py
+++ b/testing/test_mercurial.py
@@ -108,3 +108,47 @@ def test_version_in_merge(wd):
def test_parse_no_worktree(tmpdir):
ret = parse(str(tmpdir))
assert ret is None
+
+
[email protected]
+def version_1_0(wd):
+ wd('hg branch default')
+ wd.commit_testfile()
+ wd('hg tag 1.0 -u test -d "0 0"')
+ return wd
+
+
[email protected]
+def pre_merge_commit_after_tag(wd, version_1_0):
+ wd('hg branch testbranch')
+ wd.write('branchfile', 'branchtext')
+ wd(wd.add_command)
+ wd.commit()
+ wd('hg update default')
+ wd('hg merge testbranch')
+ return wd
+
+
[email protected]("pre_merge_commit_after_tag")
+def test_version_bump_before_merge_commit(wd):
+ assert wd.version.startswith('1.1.dev1+')
+
+
[email protected](219)
[email protected]("pre_merge_commit_after_tag")
+def test_version_bump_from_merge_commit(wd):
+ wd.commit()
+ assert wd.version.startswith('1.1.dev3+') # issue 219
+
+
[email protected]("version_1_0")
+def test_version_bump_from_commit_including_hgtag_mods(wd):
+ """ Test the case where a commit includes changes to .hgtags and other files
+ """
+ with wd.cwd.join('.hgtags').open('a') as tagfile:
+ tagfile.write('0 0\n')
+ wd.write('branchfile', 'branchtext')
+ wd(wd.add_command)
+ assert wd.version.startswith('1.1.dev1+') # bump from dirty version
+ wd.commit() # commits both the testfile _and_ .hgtags
+ assert wd.version.startswith('1.1.dev2+')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 1.15 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
-e git+https://github.com/pypa/setuptools_scm.git@632b02b8ad1f10c79d36c03502069d74c9686c73#egg=setuptools_scm
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: setuptools_scm
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/setuptools_scm
| [
"testing/test_mercurial.py::test_version_bump_from_merge_commit"
] | [
"testing/test_mercurial.py::test_archival_to_version[1.2.2.dev0-data4]"
] | [
"testing/test_mercurial.py::test_archival_to_version[0.0-data0]",
"testing/test_mercurial.py::test_archival_to_version[1.0-data1]",
"testing/test_mercurial.py::test_archival_to_version[1.1.dev3+h000000000000-data2]",
"testing/test_mercurial.py::test_archival_to_version[1.2.2-data3]",
"testing/test_mercurial.py::test_find_files_stop_at_root_hg",
"testing/test_mercurial.py::test_version_from_hg_id",
"testing/test_mercurial.py::test_version_from_archival",
"testing/test_mercurial.py::test_version_in_merge",
"testing/test_mercurial.py::test_parse_no_worktree",
"testing/test_mercurial.py::test_version_bump_before_merge_commit",
"testing/test_mercurial.py::test_version_bump_from_commit_including_hgtag_mods"
] | [] | MIT License | 2,276 | 318 | [
"setuptools_scm/hg.py"
] |
tableau__server-client-python-274 | 86e463810be80c2b562845f7c14b775d604f2a86 | 2018-03-09 02:04:54 | 59bf8920730e9877675c31885d538748e7e36bfe | shinchris: :rocket: | diff --git a/samples/download_view_image.py b/samples/download_view_image.py
index 2da2320..b95a862 100644
--- a/samples/download_view_image.py
+++ b/samples/download_view_image.py
@@ -43,7 +43,7 @@ def main():
tableau_auth = TSC.TableauAuth(args.username, password, site_id=site_id)
server = TSC.Server(args.server)
# The new endpoint was introduced in Version 2.5
- server.version = 2.5
+ server.version = "2.5"
with server.auth.sign_in(tableau_auth):
# Step 2: Query for the view that we want an image of
diff --git a/tableauserverclient/server/endpoint/endpoint.py b/tableauserverclient/server/endpoint/endpoint.py
index deaa94a..e78b2e0 100644
--- a/tableauserverclient/server/endpoint/endpoint.py
+++ b/tableauserverclient/server/endpoint/endpoint.py
@@ -27,6 +27,17 @@ class Endpoint(object):
return headers
+ @staticmethod
+ def _safe_to_log(server_response):
+ '''Checks if the server_response content is not xml (eg binary image or zip)
+ and and replaces it with a constant
+ '''
+ ALLOWED_CONTENT_TYPES = ('application/xml',)
+ if server_response.headers.get('Content-Type', None) not in ALLOWED_CONTENT_TYPES:
+ return '[Truncated File Contents]'
+ else:
+ return server_response.content
+
def _make_request(self, method, url, content=None, request_object=None,
auth_token=None, content_type=None, parameters=None):
if request_object is not None:
@@ -50,7 +61,7 @@ class Endpoint(object):
return server_response
def _check_status(self, server_response):
- logger.debug(server_response.content)
+ logger.debug(self._safe_to_log(server_response))
if server_response.status_code not in Success_codes:
raise ServerResponseError.from_response(server_response.content, self.parent_srv.namespace)
| This log line is overly chatty
https://github.com/tableau/server-client-python/blob/608aa7694d0560ea3c8c37b10127b11207e56e8d/tableauserverclient/server/endpoint/endpoint.py#L53
When using server client python to download workbooks or data sources and you've got log_level=Debug, this log line ends up blowing up your logs. It outputs the hexadecimal representation of the entire file you're downloading, which is not very helpful and explodes your log size. Can we remove this line, or only log out the response contents when you're not using the endpoint to download a file? | tableau/server-client-python | diff --git a/test/test_regression_tests.py b/test/test_regression_tests.py
index 95bdcea..8958c3c 100644
--- a/test/test_regression_tests.py
+++ b/test/test_regression_tests.py
@@ -1,8 +1,23 @@
import unittest
import tableauserverclient.server.request_factory as factory
+from tableauserverclient.server.endpoint import Endpoint
class BugFix257(unittest.TestCase):
def test_empty_request_works(self):
result = factory.EmptyRequest().empty_req()
self.assertEqual(b'<tsRequest />', result)
+
+
+class BugFix273(unittest.TestCase):
+ def test_binary_log_truncated(self):
+
+ class FakeResponse(object):
+
+ headers = {'Content-Type': 'application/octet-stream'}
+ content = b'\x1337' * 1000
+ status_code = 200
+
+ server_response = FakeResponse()
+
+ self.assertEqual(Endpoint._safe_to_log(server_response), '[Truncated File Contents]')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2021.5.30
charset-normalizer==2.0.12
idna==3.10
requests==2.27.1
-e git+https://github.com/tableau/server-client-python.git@86e463810be80c2b562845f7c14b775d604f2a86#egg=tableauserverclient
urllib3==1.26.20
| name: server-client-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- charset-normalizer==2.0.12
- idna==3.10
- requests==2.27.1
- urllib3==1.26.20
prefix: /opt/conda/envs/server-client-python
| [
"test/test_regression_tests.py::BugFix273::test_binary_log_truncated"
] | [] | [
"test/test_regression_tests.py::BugFix257::test_empty_request_works"
] | [] | MIT License | 2,277 | 482 | [
"samples/download_view_image.py",
"tableauserverclient/server/endpoint/endpoint.py"
] |
pydicom__pydicom-595 | 06e2352e8dc5e3ad0801ffa52339b469ae2b7b10 | 2018-03-10 20:24:57 | fcc63f0b96fb370b0eb60b2c765b469ce62e597c | darcymason: I had a quick look at this and it looked fine, but then I tried commenting out the 'continue' lines to make the test fail, and got some strange warnings from values.py about unexpected length.
I've been travelling and haven't had a chance to try again or investigate further. Will look into it when I can, or perhaps someone else can give it a try -- I may have set something up incorrectly.
mrbean-bremen: Hm, if I do the same (e.g. replace `continue` with `pass`) I get:
```
def test_write_removes_grouplength(self):
ds = dcmread(color_pl_name)
assert 0x00080000 in ds
ds.save_as(self.file_out, write_like_original=True)
self.file_out.seek(0)
ds = dcmread(self.file_out)
# group length has been removed
> assert 0x00080000 not in ds
E AssertionError: assert 524288 not in (0008, 0000) Group Length UL: 480\n(0008, 0008) Image Type CS: ['ORIGIN...Group Length UL: 92172\n(7fe0, 0010) Pixel Data OW: Array of 92160 bytes
```
which looks ok to me.
darcymason: Okay, I finally cracked this - it had nothing to do with this latest code, but I couldn't leave the mystery. Well, it is still a little bit of a mystery.
It went away if I removed the other classes from test_filewriter.py. The second clue was that the error and warnings appeared twice as often as expected. Finally I noticed there were coming both from class WriteFileTests and from ScratchWriteDateTimeTests. Then finally noticed that the latter is *derived* from the former. Changed that to subclass from unittest.TestCase, and my issue went away, and the tests run normally with the `continue` line put back also. So I haven't tried to figure out exactly why it causes trouble in both classes when the one is derived from the other, but it certainly is the source. I suspect it is something to do with the temp file not being reset, and maybe the order of events depends on platform (I was testing on Windows, python 3.6.4).
So... @mrbean-bremen, if you don't mind updating that one line, then I'd be happy to merge this.
scaramallion: It looks like the change was made so that the `WriteFileTests` tests are run again but with `config.datetime_conversion = True`
darcymason: Yes, I think you are right, I remember that now that you bring it up. But somehow that is not working out when the new test fails. Well, it shouldn't fail, of course, so maybe it is a moot point, but it does seem the code is a bit fragile somehow. Personally I like the 'test first' philosophy where you write the new unit test (so that it fails) before fixing the main code.
darcymason: ... I'll merge this and add a separate issue for the test code problem. | diff --git a/pydicom/filewriter.py b/pydicom/filewriter.py
index 01d9a4911..e283da86f 100644
--- a/pydicom/filewriter.py
+++ b/pydicom/filewriter.py
@@ -458,6 +458,9 @@ def write_dataset(fp, dataset, parent_encoding=default_encoding):
tags = sorted(dataset.keys())
for tag in tags:
+ # do not write retired Group Length (see PS3.5, 7.2)
+ if tag.element == 0 and tag.group > 6:
+ continue
with tag_in_exception(tag):
# write_data_element(fp, dataset.get_item(tag), dataset_encoding)
# XXX for writing raw tags without converting to DataElement
| Update filewriter group length fields
_From [[email protected]](https://code.google.com/u/[email protected]/) on December 10, 2008 22:48:30_
It looks like filewriter module does not recalc group length fields except
for file meta info section. Should make this the case for all groups.
_Original issue: http://code.google.com/p/pydicom/issues/detail?id=30_
| pydicom/pydicom | diff --git a/pydicom/tests/test_filewriter.py b/pydicom/tests/test_filewriter.py
index f2bac9a81..362704140 100644
--- a/pydicom/tests/test_filewriter.py
+++ b/pydicom/tests/test_filewriter.py
@@ -39,6 +39,7 @@ ct_name = get_testdata_files("CT_small.dcm")[0]
mr_name = get_testdata_files("MR_small.dcm")[0]
jpeg_name = get_testdata_files("JPEG2000.dcm")[0]
no_ts = get_testdata_files("meta_missing_tsyntax.dcm")[0]
+color_pl_name = get_testdata_files("color-pl.dcm")[0]
datetime_name = mr_name
unicode_name = get_charset_files("chrH31.dcm")[0]
@@ -194,6 +195,15 @@ class WriteFileTests(unittest.TestCase):
ds = dcmread(fp, force=True)
assert ds[0xFFFFFFFF].value == b'123456'
+ def test_write_removes_grouplength(self):
+ ds = dcmread(color_pl_name)
+ assert 0x00080000 in ds
+ ds.save_as(self.file_out, write_like_original=True)
+ self.file_out.seek(0)
+ ds = dcmread(self.file_out)
+ # group length has been removed
+ assert 0x00080000 not in ds
+
class ScratchWriteDateTimeTests(WriteFileTests):
"""Write and reread simple or multi-value DA/DT/TM data elements"""
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
-e git+https://github.com/pydicom/pydicom.git@06e2352e8dc5e3ad0801ffa52339b469ae2b7b10#egg=pydicom
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pydicom
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/pydicom
| [
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_removes_grouplength",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_removes_grouplength"
] | [] | [
"pydicom/tests/test_filewriter.py::WriteFileTests::testCT",
"pydicom/tests/test_filewriter.py::WriteFileTests::testJPEG2000",
"pydicom/tests/test_filewriter.py::WriteFileTests::testListItemWriteBack",
"pydicom/tests/test_filewriter.py::WriteFileTests::testMR",
"pydicom/tests/test_filewriter.py::WriteFileTests::testMultiPN",
"pydicom/tests/test_filewriter.py::WriteFileTests::testRTDose",
"pydicom/tests/test_filewriter.py::WriteFileTests::testRTPlan",
"pydicom/tests/test_filewriter.py::WriteFileTests::testUnicode",
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_double_filemeta",
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_ffff_ffff",
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_no_ts",
"pydicom/tests/test_filewriter.py::WriteFileTests::testwrite_short_uid",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testCT",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testJPEG2000",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testListItemWriteBack",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testMR",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testMultiPN",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testRTDose",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testRTPlan",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testUnicode",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_multivalue_DA",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_double_filemeta",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_ffff_ffff",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_no_ts",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testwrite_short_uid",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_empty_AT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_DA",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_DT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OD_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OD_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OL_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OL_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_TM",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UC_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UC_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UN_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UR_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UR_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_empty_LO",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_DA",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_DT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_TM",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_unknown_vr_raises",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_lut_descriptor",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_overlay",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_data",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_representation_vm_one",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_representation_vm_three",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_sequence",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_waveform_bits_allocated",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_not_ambiguous",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_pixel_data_not_ow_or_ob",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_big_endian",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_little_endian",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_raises",
"pydicom/tests/test_filewriter.py::ScratchWriteTests::testImpl_LE_deflen_write",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_preamble_default",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_preamble_custom",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_no_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_none_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_bad_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_prefix",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_prefix_none",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_ds_changed",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_not_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_raises",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_media_storage_sop_class_uid_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_write_no_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_raise_no_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_add_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_standard",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_commandset_no_written",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_bad_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_missing_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_group_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_group_length_updated",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_version",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_implementation_version_name_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_implementation_class_uid_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_filelike_position",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_filemeta",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_ds_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_file_meta_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_no_preamble",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_filemeta",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_custom",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_default",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_read_write_identical",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_bad_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_filelike_position",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_group_length_updated",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_meta_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_missing_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_transfer_syntax_not_added",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_empty_value",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_list",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_singleton",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_exception",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_big_endian",
"pydicom/tests/test_filewriter.py::TestWritePN::test_no_encoding_unicode",
"pydicom/tests/test_filewriter.py::TestWritePN::test_no_encoding",
"pydicom/tests/test_filewriter.py::TestWriteDT::test_format_dt",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_big_endian_correct_data",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_big_endian_incorrect_data",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_little_endian_correct_data",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_little_endian_incorrect_data"
] | [] | MIT License | 2,285 | 179 | [
"pydicom/filewriter.py"
] |
awslabs__aws-cfn-template-flip-43 | 168476fed202b08221f163de22adb9cb859d937e | 2018-03-12 14:21:52 | 168476fed202b08221f163de22adb9cb859d937e | diff --git a/cfn_flip/yaml_dumper.py b/cfn_flip/yaml_dumper.py
index 85b287d..2a3a764 100644
--- a/cfn_flip/yaml_dumper.py
+++ b/cfn_flip/yaml_dumper.py
@@ -15,7 +15,9 @@ See the License for the specific language governing permissions and limitations
from cfn_clean.yaml_dumper import CleanCfnYamlDumper
from cfn_tools.odict import ODict
from cfn_tools.yaml_dumper import CfnYamlDumper
+import six
+TAG_STR = "tag:yaml.org,2002:str"
TAG_MAP = "tag:yaml.org,2002:map"
CONVERTED_SUFFIXES = ["Ref", "Condition"]
@@ -46,6 +48,13 @@ class LongCleanDumper(CleanCfnYamlDumper):
"""
+def string_representer(dumper, value):
+ if value.startswith("0"):
+ return dumper.represent_scalar(TAG_STR, value, style="'")
+
+ return dumper.represent_scalar(TAG_STR, value)
+
+
def fn_representer(dumper, fn_name, value):
tag = "!{}".format(fn_name)
@@ -82,6 +91,7 @@ def map_representer(dumper, value):
# Customise our dumpers
Dumper.add_representer(ODict, map_representer)
+Dumper.add_representer(six.text_type, string_representer)
CleanDumper.add_representer(ODict, map_representer)
| Inconsistent conversion of strings from json to yaml
I am converting a document from json to yaml as part of a CloudFormation Template, and am noticing an odd error where some Id's that are marked as strings are being converted to strings, and other times not.
Here's a json snippet I'm working with right now which are the mappings for some of the Generic Elastic Load Balancer ID's for AWS:
``` "Mappings": {
"Regions": {
"us-east-1": {
"ELBID": "127311923021",
"Name": "ue1"
},
"us-east-2": {
"ELBID": "033677994240",
"Name": "ue2"
},
"us-west-1": {
"ELBID": "027434742980",
"Name": "uw1"
},
"us-west-2": {
"ELBID": "797873946194",
"Name": "uw2"
}
}
}
```
And This is the resulting yaml I'm getting after calling to_yaml:
```
Mappings:
Regions:
us-east-1:
ELBID: '127311923021'
Name: ue1
us-east-2:
ELBID: 033677994240
Name: ue2
us-west-1:
ELBID: 027434742980
Name: uw1
us-west-2:
ELBID: '797873946194'
Name: uw2
```
Strangely enough, any number beginning with 0 is converted, but the ones beginning with other numbers do not. I'm not sure what the expected behavior should be in this case, (either fully converted or not) but having it half and half is inconsistent, and I would believe is a bug.
Currently I'm having errors with using this yaml with sceptre/CloudFormation due to some of the Elastic Load Balancer ID's not being strings. | awslabs/aws-cfn-template-flip | diff --git a/tests/test_flip.py b/tests/test_flip.py
index c479a20..5ac0cee 100644
--- a/tests/test_flip.py
+++ b/tests/test_flip.py
@@ -502,5 +502,39 @@ def test_get_dumper():
When invoking get_dumper use clean_up & long_form
:return: LongCleanDumper
"""
+
resp = cfn_flip.get_dumper(clean_up=True, long_form=True)
assert resp == cfn_flip.yaml_dumper.LongCleanDumper
+
+
+def test_quoted_digits():
+ """
+ Any value that is composed entirely of digits
+ should be quoted for safety.
+ CloudFormation is happy for numbers to appear as strings.
+ But the opposite (e.g. account numbers as numbers) can cause issues
+ See https://github.com/awslabs/aws-cfn-template-flip/issues/41
+ """
+
+ value = dump_json(ODict((
+ ("int", 123456),
+ ("float", 123.456),
+ ("oct", "0123456"),
+ ("bad-oct", "012345678"),
+ ("safe-oct", "0o123456"),
+ ("string", "abcdef"),
+ )))
+
+ expected = "\n".join((
+ "int: 123456",
+ "float: 123.456",
+ "oct: '0123456'",
+ "bad-oct: '012345678'",
+ "safe-oct: '0o123456'",
+ "string: abcdef",
+ ""
+ ))
+
+ actual = cfn_flip.to_yaml(value)
+
+ assert actual == expected
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-sugar"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/awslabs/aws-cfn-template-flip.git@168476fed202b08221f163de22adb9cb859d937e#egg=cfn_flip
click==8.0.4
coverage==6.2
distlib==0.3.9
filelock==3.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-sugar==0.9.6
PyYAML==6.0.1
six==1.17.0
termcolor==1.1.0
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
virtualenv==20.17.1
zipp==3.6.0
| name: aws-cfn-template-flip
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- click==8.0.4
- coverage==6.2
- distlib==0.3.9
- filelock==3.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-sugar==0.9.6
- pyyaml==6.0.1
- six==1.17.0
- termcolor==1.1.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- virtualenv==20.17.1
- zipp==3.6.0
prefix: /opt/conda/envs/aws-cfn-template-flip
| [
"tests/test_flip.py::test_quoted_digits"
] | [
"tests/test_flip.py::test_to_yaml_with_yaml",
"tests/test_flip.py::test_flip_with_bad_data",
"tests/test_flip.py::test_explicit_json_rejects_yaml",
"tests/test_flip.py::test_explicit_yaml_rejects_bad_yaml"
] | [
"tests/test_flip.py::test_flip_to_json_with_datetimes",
"tests/test_flip.py::test_flip_to_yaml_with_clean_getatt",
"tests/test_flip.py::test_flip_to_yaml_with_multi_level_getatt",
"tests/test_flip.py::test_flip_to_yaml_with_dotted_getatt",
"tests/test_flip.py::test_flip_to_json_with_multi_level_getatt",
"tests/test_flip.py::test_getatt_from_yaml",
"tests/test_flip.py::test_flip_to_json_with_condition",
"tests/test_flip.py::test_flip_to_yaml_with_newlines",
"tests/test_flip.py::test_clean_flip_to_yaml_with_newlines",
"tests/test_flip.py::test_unconverted_types",
"tests/test_flip.py::test_get_dumper"
] | [] | Apache License 2.0 | 2,289 | 365 | [
"cfn_flip/yaml_dumper.py"
] |
|
datosgobar__pydatajson-131 | ef99387305d7cd46831c715c7a443f4b056baeb4 | 2018-03-16 17:55:35 | adb85a7de7dfa073ddf9817a5fe2d125f9ce4e54 | diff --git a/pydatajson/ckan_utils.py b/pydatajson/ckan_utils.py
index f71570d..9724f44 100644
--- a/pydatajson/ckan_utils.py
+++ b/pydatajson/ckan_utils.py
@@ -1,6 +1,8 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
+import re
+import sys
from datetime import time
from dateutil import parser, tz
from .helpers import title_to_name
@@ -14,7 +16,7 @@ def append_attribute_to_extra(package, dataset, attribute, serialize=False):
package['extras'].append({'key': attribute, 'value': value})
-def map_dataset_to_package(dataset, owner_org, theme_taxonomy, catalog_id=None,
+def map_dataset_to_package(catalog, dataset, owner_org, catalog_id=None,
demote_superThemes=True, demote_themes=True):
package = dict()
package['extras'] = []
@@ -66,7 +68,8 @@ def map_dataset_to_package(dataset, owner_org, theme_taxonomy, catalog_id=None,
if themes and demote_themes:
package['tags'] = package.get('tags', [])
for theme in themes:
- label = next(x['label'] for x in theme_taxonomy if x['id'] == theme)
+ label = catalog.get_theme(identifier=theme)['label']
+ label = re.sub(r'[^\wá-úÁ-ÚñÑ .-]+', '', label, flags=re.UNICODE)
package['tags'].append({'name': label})
else:
package['groups'] = package.get('groups', []) + [{'name': title_to_name(theme, decode=False)}
diff --git a/pydatajson/federation.py b/pydatajson/federation.py
index 2c31623..9573040 100644
--- a/pydatajson/federation.py
+++ b/pydatajson/federation.py
@@ -29,9 +29,8 @@ def push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier, portal_u
"""
dataset = catalog.get_dataset(dataset_origin_identifier)
ckan_portal = RemoteCKAN(portal_url, apikey=apikey)
- theme_taxonomy = catalog.themes
- package = map_dataset_to_package(dataset, owner_org, theme_taxonomy, catalog_id,
+ package = map_dataset_to_package(catalog, dataset, owner_org, catalog_id,
demote_superThemes, demote_themes)
# Get license id
diff --git a/pydatajson/search.py b/pydatajson/search.py
index 0ab6391..1c3d25a 100644
--- a/pydatajson/search.py
+++ b/pydatajson/search.py
@@ -280,7 +280,7 @@ def get_theme(catalog, identifier=None, label=None):
# filtra por id (preferentemente) o label
if identifier:
- filtered_themes = [theme for theme in themes if theme["id"] == identifier]
+ filtered_themes = [theme for theme in themes if theme["id"].lower() == identifier.lower()]
if len(filtered_themes) > 1:
raise ThemeIdRepeated([x["id"] for x in filtered_themes])
| Robustecer el manejo de themes por parte de push_dataset_to_ckan()
Este es un issue que hace referencia al 24 de monitoreo apertura:
https://github.com/datosgobar/monitoreo-apertura/issues/24
Lo abro acá porque me parece que es más apropiado discutirlo en este repo.
--------------------------------------------------------------------------------------------------
**Contexto**
Se intentó federar el dataset de series de tiempo a la instancia andino.datos.gob.ar, de la siguiente manera:
```
catalog_modernizacion = DataJson("http://infra.datos.gob.ar/catalog/modernizacion/data.json")
catalog_modernizacion.push_dataset_to_ckan(
"modernizacion", "ministerio-de-modernizacion", "7",
portal_url, apikey
)
```
y dio la rta:
```
---------------------------------------------------------------------------
StopIteration Traceback (most recent call last)
<ipython-input-8-b4c2dae3c9ec> in <module>()
1 catalog_modernizacion.push_dataset_to_ckan(
2 "modernizacion", "ministerio-de-modernizacion", "7",
----> 3 portal_url, apikey
4 )
/Users/abenassi/github/pydatajson/pydatajson/federation.pyc in push_dataset_to_ckan(catalog, catalog_id, owner_org, dataset_origin_identifier, portal_url, apikey)
49 theme_taxonomy = catalog.themes
50 for theme in themes:
---> 51 label = next(x['label'] for x in theme_taxonomy if x['id'] == theme)
52 package['tags'].append({'name': label})
53
StopIteration:
```
**Propuesta**
Investigar qué hizo que se frenara la operación. Puede ser que el problema sea que se requiere crear "temas" que la instancia destino no tiene. Si es así, lo mejor sería incorporar la creación automática de temas que no existan dentro de la operación de federación. | datosgobar/pydatajson | diff --git a/tests/samples/full_data.json b/tests/samples/full_data.json
index b349578..e80673c 100644
--- a/tests/samples/full_data.json
+++ b/tests/samples/full_data.json
@@ -193,7 +193,7 @@
"id": "convocatorias"
},
{
- "label": "Compras",
+ "label": "Adquisición",
"description": "Datasets sobre compras realizadas.",
"id": "compras"
},
@@ -213,7 +213,7 @@
"id": "normativa"
},
{
- "label": "Proveedores",
+ "label": "Proveeduría",
"description": "Datasets sobre proveedores del Estado.",
"id": "proveedores"
}
diff --git a/tests/test_ckan_utils.py b/tests/test_ckan_utils.py
index 3f20984..f90406e 100644
--- a/tests/test_ckan_utils.py
+++ b/tests/test_ckan_utils.py
@@ -1,6 +1,10 @@
+# -*- coding: utf-8 -*-
+
import unittest
import os
import json
+import re
+import sys
from dateutil import parser, tz
from .context import pydatajson
from pydatajson.ckan_utils import map_dataset_to_package, map_distributions_to_resources, convert_iso_string_to_utc
@@ -23,15 +27,15 @@ class DatasetConversionTestCase(unittest.TestCase):
cls.distributions = cls.dataset['distribution']
def test_catalog_id_is_prepended_to_dataset_id_if_passed(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes, catalog_id=self.catalog_id)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner', catalog_id=self.catalog_id)
self.assertEqual(self.catalog_id + '_' + self.dataset_id, package['id'])
def test_dataset_id_is_preserved_if_catlog_id_is_not_passed(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner')
self.assertEqual(self.dataset_id, package['id'])
def test_replicated_plain_attributes_are_corrext(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes, catalog_id=self.catalog_id)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner', catalog_id=self.catalog_id)
plain_replicated_attributes = [('title', 'title'),
('notes', 'description'),
('url', 'landingPage')]
@@ -40,7 +44,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertEqual('owner', package['owner_org'])
def test_dataset_nested_replicated_attributes_stay_the_same(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes, catalog_id=self.catalog_id)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner', catalog_id=self.catalog_id)
contact_point_nested = [('maintainer', 'fn'),
('maintainer_email', 'hasEmail')]
for fst, snd in contact_point_nested:
@@ -51,7 +55,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertEqual(self.dataset.get('publisher').get(snd), package.get(fst))
def test_dataset_array_attributes_are_correct(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes, catalog_id=self.catalog_id)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner', catalog_id=self.catalog_id)
groups = [group['name'] for group in package.get('groups', [])]
super_themes = [title_to_name(s_theme.lower()) for s_theme in self.dataset.get('superTheme')]
try:
@@ -65,7 +69,8 @@ class DatasetConversionTestCase(unittest.TestCase):
themes = self.dataset.get('theme', [])
theme_labels = []
for theme in themes:
- label = next(x['label'] for x in self.catalog.themes if x['id'] == theme)
+ label = self.catalog.get_theme(identifier=theme)['label']
+ label = re.sub(r'[^\w .-]+', '', label, flags=re.UNICODE)
theme_labels.append(label)
try:
@@ -74,7 +79,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertCountEqual(keywords + theme_labels, tags)
def test_themes_are_preserved_if_not_demoted(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes,
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner',
catalog_id=self.catalog_id, demote_themes=False)
groups = [group['name'] for group in package.get('groups', [])]
super_themes = [title_to_name(s_theme.lower()) for s_theme in self.dataset.get('superTheme')]
@@ -92,7 +97,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertCountEqual(keywords, tags)
def test_superThemes_dont_impact_groups_if_not_demoted(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes,
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner',
catalog_id=self.catalog_id, demote_superThemes=False)
groups = [group['name'] for group in package.get('groups', [])]
tags = [tag['name'] for tag in package['tags']]
@@ -100,7 +105,8 @@ class DatasetConversionTestCase(unittest.TestCase):
themes = self.dataset.get('theme', [])
theme_labels = []
for theme in themes:
- label = next(x['label'] for x in self.catalog.themes if x['id'] == theme)
+ label = self.catalog.get_theme(identifier=theme)['label']
+ label = re.sub(r'[^\wá-úÁ-ÚñÑ .-]+', '', label, flags=re.UNICODE)
theme_labels.append(label)
try:
self.assertItemsEqual([], groups)
@@ -112,7 +118,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertCountEqual(keywords + theme_labels, tags)
def test_preserve_themes_and_superThemes(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes,
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner',
self.catalog_id, False, False)
groups = [group['name'] for group in package.get('groups', [])]
tags = [tag['name'] for tag in package['tags']]
@@ -128,7 +134,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertCountEqual(keywords, tags)
def test_dataset_extra_attributes_are_correct(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes, catalog_id=self.catalog_id)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner', catalog_id=self.catalog_id)
# extras are included in dataset
if package['extras']:
for extra in package['extras']:
@@ -144,7 +150,7 @@ class DatasetConversionTestCase(unittest.TestCase):
self.assertEqual(dataset_value, extra_value)
def test_dataset_extra_attributes_are_complete(self):
- package = map_dataset_to_package(self.dataset, 'owner', self.catalog.themes, catalog_id=self.catalog_id)
+ package = map_dataset_to_package(self.catalog, self.dataset, 'owner', catalog_id=self.catalog_id)
# dataset attributes are included in extras
extra_attrs = ['issued', 'modified', 'accrualPeriodicity', 'temporal', 'language', 'spatial', 'superTheme']
for key in extra_attrs:
diff --git a/tests/test_federation.py b/tests/test_federation.py
index e4a1d2e..e6804b9 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -1,6 +1,9 @@
+# -*- coding: utf-8 -*-
+
import unittest
import os
import re
+import sys
try:
from mock import patch, MagicMock
except ImportError:
@@ -83,10 +86,10 @@ class PushDatasetTestCase(unittest.TestCase):
@patch('pydatajson.federation.RemoteCKAN', autospec=True)
def test_tags_are_passed_correctly(self, mock_portal):
themes = self.dataset['theme']
- theme_taxonomy = self.catalog.themes
keywords = [kw for kw in self.dataset['keyword']]
for theme in themes:
- label = next(x['label'] for x in theme_taxonomy if x['id'] == theme)
+ label = self.catalog.get_theme(identifier=theme)['label']
+ label = re.sub(r'[^\w .-]+', '', label, flags=re.UNICODE)
keywords.append(label)
def mock_call_action(action, data_dict=None):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 3
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"mkdir tests/temp"
],
"python": "3.6",
"reqs_path": [
"requirements.txt",
"requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
argh==0.27.2
asn1crypto==1.5.1
attrs==22.2.0
Babel==2.11.0
bumpversion==0.5.3
certifi==2021.5.30
cffi==1.15.1
chardet==3.0.4
ckanapi==4.0
CommonMark==0.5.4
coverage==4.1
cryptography==2.1.4
distlib==0.3.9
docopt==0.6.2
docutils==0.18.1
et-xmlfile==1.1.0
filelock==3.4.1
flake8==2.6.0
idna==2.6
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
isodate==0.6.0
jdcal==1.4.1
Jinja2==3.0.3
jsonschema==2.6.0
MarkupSafe==2.0.1
mccabe==0.5.3
multidict==5.2.0
nose==1.3.7
openpyxl==2.4.11
packaging==21.3
pathtools==0.1.2
pkginfo==1.10.0
platformdirs==2.4.0
pluggy==0.13.1
pockets==0.9.1
py==1.11.0
pycodestyle==2.0.0
pycparser==2.21
-e git+https://github.com/datosgobar/pydatajson.git@ef99387305d7cd46831c715c7a443f4b056baeb4#egg=pydatajson
pyflakes==1.2.3
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.6.1
pytz==2025.2
PyYAML==3.11
recommonmark==0.4.0
requests==2.18.4
requests-toolbelt==1.0.0
rfc3987==1.3.7
six==1.11.0
snowballstemmer==2.2.0
Sphinx==1.5.2
sphinx-rtd-theme==0.2.4
sphinxcontrib-napoleon==0.6.1
tomli==1.2.3
tox==2.9.1
tqdm==4.64.1
twine==1.9.1
typing_extensions==4.1.1
unicodecsv==0.14.1
Unidecode==0.4.21
urllib3==1.22
vcrpy==1.11.1
virtualenv==20.17.1
watchdog==0.8.3
wrapt==1.16.0
yarl==1.7.2
zipp==3.6.0
| name: pydatajson
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- argh==0.27.2
- asn1crypto==1.5.1
- attrs==22.2.0
- babel==2.11.0
- bumpversion==0.5.3
- cffi==1.15.1
- chardet==3.0.4
- ckanapi==4.0
- commonmark==0.5.4
- coverage==4.1
- cryptography==2.1.4
- distlib==0.3.9
- docopt==0.6.2
- docutils==0.18.1
- et-xmlfile==1.1.0
- filelock==3.4.1
- flake8==2.6.0
- idna==2.6
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- isodate==0.6.0
- jdcal==1.4.1
- jinja2==3.0.3
- jsonschema==2.6.0
- markupsafe==2.0.1
- mccabe==0.5.3
- multidict==5.2.0
- nose==1.3.7
- openpyxl==2.4.11
- packaging==21.3
- pathtools==0.1.2
- pkginfo==1.10.0
- platformdirs==2.4.0
- pluggy==0.13.1
- pockets==0.9.1
- py==1.11.0
- pycodestyle==2.0.0
- pycparser==2.21
- pyflakes==1.2.3
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.6.1
- pytz==2025.2
- pyyaml==3.11
- recommonmark==0.4.0
- requests==2.18.4
- requests-toolbelt==1.0.0
- rfc3987==1.3.7
- six==1.11.0
- snowballstemmer==2.2.0
- sphinx==1.5.2
- sphinx-rtd-theme==0.2.4
- sphinxcontrib-napoleon==0.6.1
- tomli==1.2.3
- tox==2.9.1
- tqdm==4.64.1
- twine==1.9.1
- typing-extensions==4.1.1
- unicodecsv==0.14.1
- unidecode==0.04.21
- urllib3==1.22
- vcrpy==1.11.1
- virtualenv==20.17.1
- watchdog==0.8.3
- wrapt==1.16.0
- yarl==1.7.2
- zipp==3.6.0
prefix: /opt/conda/envs/pydatajson
| [
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_catalog_id_is_prepended_to_dataset_id_if_passed",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_array_attributes_are_correct",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_extra_attributes_are_complete",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_extra_attributes_are_correct",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_id_is_preserved_if_catlog_id_is_not_passed",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_dataset_nested_replicated_attributes_stay_the_same",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_preserve_themes_and_superThemes",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_replicated_plain_attributes_are_corrext",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_superThemes_dont_impact_groups_if_not_demoted",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_themes_are_preserved_if_not_demoted"
] | [] | [
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_catalog_id_is_prefixed_in_resource_id_if_passed",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_resource_id_is_preserved_if_catalog_id_is_not_passed",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_resources_extra_attributes_are_created_correctly",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_resources_replicated_attributes_stay_the_same",
"tests/test_ckan_utils.py::DatasetConversionTestCase::test_resources_transformed_attributes_are_correct",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_dates_change_correctly",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_dates_stay_the_same",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_datetimes_without_microseconds_are_handled_correctly",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_datetimes_without_seconds_are_handled_correctly",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_datetimes_without_timezones_stay_the_same",
"tests/test_ckan_utils.py::DatetimeConversionTests::test_timezones_are_handled_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_id_is_preserved_if_catalog_id_is_not_passed",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_without_license_sets_notspecified",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_created_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_updated_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_licenses_are_interpreted_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_tags_are_passed_correctly",
"tests/test_federation.py::RemoveDatasetTestCase::test_empty_search_doesnt_call_purge",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_out_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_one_dataset",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_over_500_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_remove_through_filters_and_organization"
] | [] | MIT License | 2,298 | 746 | [
"pydatajson/ckan_utils.py",
"pydatajson/federation.py",
"pydatajson/search.py"
] |
|
G-Node__python-odml-251 | 8953343f0f4616c0a71087d406b5f6d4a2036748 | 2018-03-16 21:34:58 | eeff5922987b064681d1328f81af317d8171808f | diff --git a/odml/doc.py b/odml/doc.py
index 8d75f2b..ea15912 100644
--- a/odml/doc.py
+++ b/odml/doc.py
@@ -34,10 +34,13 @@ class BaseDocument(base.sectionable, Document):
print(e)
self._id = str(uuid.uuid4())
self._author = author
- self._date = date # date must be a datetime
self._version = version
self._repository = repository
+ # Make sure date is properly parsed into a datetime object
+ self._date = None
+ self.date = date
+
@property
def id(self):
"""
diff --git a/odml/dtypes.py b/odml/dtypes.py
index 39d1e8d..e86bf8d 100644
--- a/odml/dtypes.py
+++ b/odml/dtypes.py
@@ -1,5 +1,5 @@
import sys
-import datetime
+import datetime as dt
from enum import Enum
self = sys.modules[__name__].__dict__
@@ -12,6 +12,10 @@ try:
except NameError:
unicode = str
+FORMAT_DATE = "%Y-%m-%d"
+FORMAT_DATETIME = "%Y-%m-%d %H:%M:%S"
+FORMAT_TIME = "%H:%M:%S"
+
class DType(str, Enum):
string = 'string'
@@ -44,11 +48,11 @@ def default_values(dtype):
return default_dtype_value[dtype]
if dtype == 'datetime':
- return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
+ return dt.datetime.now().replace(microsecond=0)
if dtype == 'date':
- return datetime.datetime.now().strftime('%Y-%m-%d')
+ return dt.datetime.now().date()
if dtype == 'time':
- return datetime.datetime.now().strftime('%H:%M:%S')
+ return dt.datetime.now().replace(microsecond=0).time()
return '' # Maybe return None ?
@@ -65,9 +69,9 @@ def infer_dtype(value):
if dtype == 'string' and '\n' in value:
dtype = 'text'
return dtype
- else:
- # If unable to infer a dtype of given value, return defalt as *string*
- return 'string'
+
+ # If unable to infer a dtype of given value, return default as *string*
+ return 'string'
def valid_type(dtype):
@@ -109,14 +113,15 @@ def set(value, dtype=None):
if isinstance(value, str):
return str_set(value)
else:
- if type(value) in (str, unicode):
+ if isinstance(value, (str, unicode)):
return str_set(value)
return self.get(dtype + "_set", str_set)(value)
def int_get(string):
- if not string:
- return 0
+ if string is None or string == "":
+ return default_values("int")
+
try:
return int(string)
except ValueError:
@@ -125,14 +130,20 @@ def int_get(string):
def float_get(string):
- if not string:
- return 0.0
+ if string is None or string == "":
+ return default_values("float")
+
return float(string)
def str_get(string):
+ # Do not stringify empty list or dict but make sure boolean False gets through.
+ if string in [None, "", [], {}]:
+ return default_values("string")
+
if sys.version_info < (3, 0):
return unicode(string)
+
return str(string)
@@ -144,71 +155,65 @@ string_set = str_get
def time_get(string):
- if not string:
- return None
- if type(string) is datetime.time:
- return datetime.datetime.strptime(string.strftime('%H:%M:%S'),
- '%H:%M:%S').time()
- else:
- return datetime.datetime.strptime(string, '%H:%M:%S').time()
+ if string is None or string == "":
+ return default_values("time")
+ if isinstance(string, dt.time):
+ return dt.datetime.strptime(string.strftime(FORMAT_TIME), FORMAT_TIME).time()
-def time_set(value):
- if not value:
- return None
- if type(value) is datetime.time:
- return value.strftime("%H:%M:%S")
- return value.isoformat()
+ return dt.datetime.strptime(string, FORMAT_TIME).time()
+
+
+time_set = time_get
def date_get(string):
- if not string:
- return None
- if type(string) is datetime.date:
- return datetime.datetime.strptime(string.isoformat(),
- '%Y-%m-%d').date()
- else:
- return datetime.datetime.strptime(string, '%Y-%m-%d').date()
+ if string is None or string == "":
+ return default_values("date")
+
+ if isinstance(string, dt.date):
+ return dt.datetime.strptime(string.isoformat(), FORMAT_DATE).date()
+
+ return dt.datetime.strptime(string, FORMAT_DATE).date()
-date_set = time_set
+date_set = date_get
def datetime_get(string):
- if not string:
- return None
- if type(string) is datetime.datetime:
- return datetime.datetime.strptime(string.strftime('%Y-%m-%d %H:%M:%S'),
- '%Y-%m-%d %H:%M:%S')
- else:
- return datetime.datetime.strptime(string, '%Y-%m-%d %H:%M:%S')
+ if string is None or string == "":
+ return default_values("datetime")
+ if isinstance(string, dt.datetime):
+ return dt.datetime.strptime(string.strftime(FORMAT_DATETIME), FORMAT_DATETIME)
-def datetime_set(value):
- if not value:
- return None
- if type(value) is datetime.datetime:
- return value.strftime('%Y-%m-%d %H:%M:%S')
- else:
- return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
+ return dt.datetime.strptime(string, FORMAT_DATETIME)
+
+
+datetime_set = datetime_get
def boolean_get(string):
- if string is None:
- return None
- if type(string) in (unicode, str):
+ if string in [None, "", [], {}]:
+ return default_values("boolean")
+
+ if isinstance(string, (unicode, str)):
string = string.lower()
+
truth = ["true", "1", True, "t"] # be kind, spec only accepts True / False
if string in truth:
return True
+
false = ["false", "0", False, "f"]
if string in false:
return False
+
# disallow any values that cannot be interpreted as boolean.
raise ValueError
# Alias boolean_set to boolean_get. Both perform same function.
+
boolean_set = boolean_get
bool_get = boolean_get
bool_set = boolean_set
diff --git a/odml/tools/dict_parser.py b/odml/tools/dict_parser.py
index 0d2cdf0..3dd95a9 100644
--- a/odml/tools/dict_parser.py
+++ b/odml/tools/dict_parser.py
@@ -83,7 +83,13 @@ class DictWriter:
if isinstance(tag, tuple):
prop_dict[attr] = list(tag)
elif (tag == []) or tag: # Even if 'value' is empty, allow '[]'
- prop_dict[attr] = tag
+ # Custom odML tuples require special handling
+ # for save loading from file.
+ if attr == "value" and prop.dtype and \
+ prop.dtype.endswith("-tuple") and len(prop.value) > 0:
+ prop_dict["value"] = "(%s)" % ";".join(prop.value[0])
+ else:
+ prop_dict[attr] = tag
props_seq.append(prop_dict)
@@ -178,17 +184,13 @@ class DictReader:
for _property in props_list:
prop_attrs = {}
- values = []
for i in _property:
attr = self.is_valid_attribute(i, odmlfmt.Property)
- if attr == 'value':
- values = _property['value']
if attr:
prop_attrs[attr] = _property[attr]
prop = odmlfmt.Property.create(**prop_attrs)
- prop.value = values
odml_props.append(prop)
return odml_props
diff --git a/odml/tools/odmlparser.py b/odml/tools/odmlparser.py
index 1caa7fa..641a52a 100644
--- a/odml/tools/odmlparser.py
+++ b/odml/tools/odmlparser.py
@@ -5,6 +5,7 @@ A generic odML parsing module.
Parses odML files and documents.
"""
+import datetime
import json
import yaml
@@ -67,11 +68,21 @@ class ODMLWriter:
if self.parser == 'YAML':
string_doc = yaml.dump(odml_output, default_flow_style=False)
elif self.parser == 'JSON':
- string_doc = json.dumps(odml_output, indent=4)
+ string_doc = json.dumps(odml_output, indent=4,
+ cls=JSONDateTimeSerializer)
return string_doc
+# Required to serialize datetime values with JSON.
+class JSONDateTimeSerializer(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, (datetime.datetime, datetime.date, datetime.time)):
+ return str(o)
+
+ return json.JSONEncoder.default(self, o)
+
+
class ODMLReader:
"""
A reader to parse odML files or strings into odml documents,
diff --git a/odml/tools/xmlparser.py b/odml/tools/xmlparser.py
index a0d48c1..f2ea862 100644
--- a/odml/tools/xmlparser.py
+++ b/odml/tools/xmlparser.py
@@ -83,7 +83,11 @@ class XMLWriter:
if val is None:
continue
if isinstance(fmt, format.Property.__class__) and k == "value":
- ele = E(k, to_csv(val))
+ # Custom odML tuples require special handling for save loading from file.
+ if e.dtype and e.dtype.endswith("-tuple") and len(val) > 0:
+ ele = E(k, "(%s)" % ";".join(val[0]))
+ else:
+ ele = E(k, to_csv(val))
cur.append(ele)
else:
if isinstance(val, list):
| [dtype] Return defined default values
Currently the "get" methods in `dtype.py` return magic numbers as default values. Make them return the already defined default values instead. | G-Node/python-odml | diff --git a/test/test_dtypes.py b/test/test_dtypes.py
index 6e90e5e..bbc3d35 100644
--- a/test/test_dtypes.py
+++ b/test/test_dtypes.py
@@ -1,8 +1,7 @@
+import datetime
import unittest
import odml.dtypes as typ
-import odml
-import datetime
class TestTypes(unittest.TestCase):
@@ -11,42 +10,150 @@ class TestTypes(unittest.TestCase):
pass
def test_date(self):
+ self.assertIsInstance(typ.date_get(None), datetime.date)
+ self.assertIsInstance(typ.date_get(""), datetime.date)
+
+ re = "^[0-9]{4}-(0[1-9]|1[0-2])-([0-2][0-9]|3[0-1])$"
+ self.assertRegexpMatches(typ.date_get(None).strftime(typ.FORMAT_DATE), re)
+ self.assertRegexpMatches(typ.date_get("").strftime(typ.FORMAT_DATE), re)
+
date = datetime.date(2011, 12, 1)
date_string = '2011-12-01'
self.assertEqual(date, typ.date_get(date_string))
- self.assertEqual(typ.date_set(date), date_string)
+ self.assertEqual(date, typ.date_get(date))
+
+ with self.assertRaises(TypeError):
+ _ = typ.date_get([])
+ with self.assertRaises(TypeError):
+ _ = typ.date_get({})
+ with self.assertRaises(TypeError):
+ _ = typ.date_get(False)
+
+ # Test fail on datetime.datetime
+ with self.assertRaises(ValueError):
+ _ = typ.date_get(datetime.datetime.now())
+
+ # Test fail on datetime.time
+ with self.assertRaises(TypeError):
+ _ = typ.date_get(datetime.datetime.now().time())
+
+ # Test fail on invalid string format
+ with self.assertRaises(ValueError):
+ _ = typ.date_get("11.11.1111")
def test_time(self):
+ self.assertIsInstance(typ.time_get(None), datetime.time)
+ self.assertIsInstance(typ.time_get(""), datetime.time)
+
+ re = "^[0-5][0-9]:[0-5][0-9]:[0-5][0-9]$"
+ self.assertRegexpMatches(typ.time_get(None).strftime(typ.FORMAT_TIME), re)
+ self.assertRegexpMatches(typ.time_get("").strftime(typ.FORMAT_TIME), re)
+
time = datetime.time(12, 34, 56)
time_string = '12:34:56'
self.assertEqual(time, typ.time_get(time_string))
- self.assertEqual(typ.time_set(time), time_string)
+ self.assertEqual(time, typ.time_get(time))
+
+ with self.assertRaises(TypeError):
+ _ = typ.time_get([])
+ with self.assertRaises(TypeError):
+ _ = typ.time_get({})
+ with self.assertRaises(TypeError):
+ _ = typ.time_get(False)
+
+ # Test fail on datetime.datetime
+ with self.assertRaises(TypeError):
+ _ = typ.time_get(datetime.datetime.now())
+
+ # Test fail on datetime.date
+ with self.assertRaises(TypeError):
+ _ = typ.time_get(datetime.datetime.now().date())
+
+ # Test fail on invalid string format
+ with self.assertRaises(ValueError):
+ _ = typ.time_get("11-11-11")
def test_datetime(self):
+ self.assertIsInstance(typ.datetime_get(None), datetime.datetime)
+ self.assertIsInstance(typ.datetime_get(""), datetime.datetime)
+
+ re = "^[0-9]{4}-(0[1-9]|1[0-2])-([0-2][0-9]|3[0-1]) " \
+ "[0-5][0-9]:[0-5][0-9]:[0-5][0-9]$"
+ self.assertRegexpMatches(typ.datetime_get(None).strftime(typ.FORMAT_DATETIME), re)
+ self.assertRegexpMatches(typ.datetime_get("").strftime(typ.FORMAT_DATETIME), re)
+
date = datetime.datetime(2011, 12, 1, 12, 34, 56)
date_string = '2011-12-01 12:34:56'
self.assertEqual(date, typ.datetime_get(date_string))
- self.assertEqual(typ.datetime_set(date), date_string)
+ self.assertEqual(date, typ.datetime_get(date))
+
+ with self.assertRaises(TypeError):
+ _ = typ.datetime_get([])
+ with self.assertRaises(TypeError):
+ _ = typ.datetime_get({})
+ with self.assertRaises(TypeError):
+ _ = typ.datetime_get(False)
+
+ # Test fail on datetime.time
+ with self.assertRaises(TypeError):
+ _ = typ.datetime_get(datetime.datetime.now().time())
+
+ # Test fail on datetime.date
+ with self.assertRaises(TypeError):
+ _ = typ.datetime_get(datetime.datetime.now().date())
+
+ # Test fail on invalid string format
+ with self.assertRaises(ValueError):
+ _ = typ.datetime_get("11.11.1111 12:12:12")
def test_int(self):
- p = odml.Property("test", value="123456789012345678901", dtype="int")
- self.assertEqual(p.value[0], 123456789012345678901)
- p = odml.Property("test", value="-123456789012345678901", dtype="int")
- self.assertEqual(p.value[0], -123456789012345678901)
- p = odml.Property("test", value="123.45", dtype="int")
- self.assertEqual(p.value[0], 123)
+ self.assertEqual(typ.default_values("int"), typ.int_get(None))
+ self.assertEqual(typ.default_values("int"), typ.int_get(""))
+
+ self.assertIsInstance(typ.int_get(11), int)
+ self.assertIsInstance(typ.int_get(1.1), int)
+ self.assertIsInstance(typ.int_get("11"), int)
+ self.assertEqual(typ.int_get("123456789012345678901"), 123456789012345678901)
+ self.assertEqual(typ.int_get("-123456789012345678901"), -123456789012345678901)
+ self.assertEqual(typ.int_get("123.45"), 123)
+
+ with self.assertRaises(TypeError):
+ _ = typ.int_get([])
+ with self.assertRaises(TypeError):
+ _ = typ.int_get({})
+ with self.assertRaises(ValueError):
+ _ = typ.int_get("fail")
+
+ def test_float(self):
+ self.assertEqual(typ.default_values("float"), typ.float_get(None))
+ self.assertEqual(typ.default_values("float"), typ.float_get(""))
+
+ self.assertIsInstance(typ.float_get(1), float)
+ self.assertIsInstance(typ.float_get("1.1"), float)
+ self.assertEqual(typ.float_get(123.45), 123.45)
+
+ with self.assertRaises(TypeError):
+ _ = typ.float_get([])
+ with self.assertRaises(TypeError):
+ _ = typ.float_get({})
+ with self.assertRaises(ValueError):
+ _ = typ.float_get("fail")
def test_str(self):
- s = odml.Property(name='Name', value='Sherin')
- self.assertEqual(s.value[0], 'Sherin')
- self.assertEqual(s.dtype, 'string')
+ self.assertEqual(typ.default_values("string"), typ.str_get(None))
+ self.assertEqual(typ.default_values("string"), typ.str_get(""))
+ self.assertEqual(typ.default_values("string"), typ.str_get([]))
+ self.assertEqual(typ.default_values("string"), typ.str_get({}))
- s.value = 'Jerin'
- self.assertEqual(s.value[0], 'Jerin')
- self.assertEqual(s.dtype, 'string')
+ # Make sure boolean values are properly converted to string.
+ self.assertEqual(typ.str_get(False), 'False')
+ self.assertEqual(typ.str_get(True), 'True')
def test_bool(self):
- self.assertEqual(None, typ.boolean_get(None))
+ self.assertEqual(typ.default_values("boolean"), typ.boolean_get(None))
+ self.assertEqual(typ.default_values("boolean"), typ.boolean_get(""))
+ self.assertEqual(typ.default_values("boolean"), typ.boolean_get([]))
+ self.assertEqual(typ.default_values("boolean"), typ.boolean_get({}))
true_values = [True, "TRUE", "true", "T", "t", "1", 1]
for val in true_values:
@@ -64,18 +171,17 @@ class TestTypes(unittest.TestCase):
typ.boolean_get(2.1)
def test_tuple(self):
- # Success test
- t = odml.Property(name="Location", value='(39.12; 67.19)', dtype='2-tuple')
- tuple_value = t.value[0] # As the formed tuple is a list of list
- self.assertEqual(tuple_value[0], '39.12')
- self.assertEqual(tuple_value[1], '67.19')
+ self.assertIs(typ.tuple_get(""), None)
+ self.assertIs(typ.tuple_get(None), None)
- # Failure test. More tuple values then specified.
- with self.assertRaises(ValueError):
- t = odml.Property(name="Public-Key", value='(5689; 1254; 687)',
- dtype='2-tuple')
+ self.assertEqual(typ.tuple_get("(39.12; 67.19)"), ["39.12", "67.19"])
+
+ # Test fail on missing parenthesis.
+ with self.assertRaises(AssertionError):
+ _ = typ.tuple_get("fail")
+ # Test fail on mismatching element count and count number.
+ with self.assertRaises(AssertionError):
+ _ = typ.tuple_get("(1; 2; 3)", 2)
def test_dtype_none(self):
- t = odml.Property(name="Record", value={'name': 'Marie'})
- self.assertEqual(t.dtype, 'string')
- self.assertEqual(t.value[0], "{'name': 'Marie'}")
+ self.assertEqual(typ.get({'name': 'Marie'}), "{'name': 'Marie'}")
diff --git a/test/test_infer_type.py b/test/test_infer_type.py
index 8909f85..7f27bc4 100644
--- a/test/test_infer_type.py
+++ b/test/test_infer_type.py
@@ -11,51 +11,51 @@ class TestInferType(unittest.TestCase):
p = Property("test", value="somestring")
assert(p.dtype == "string")
if sys.version_info < (3, 0):
- assert (type(p.value[0]) == unicode)
+ assert isinstance(p.value[0], unicode)
else:
- assert (type(p.value[0]) == str)
+ assert isinstance(p.value[0], str)
def test_text(self):
p = Property("test", value="some\nstring")
assert(p.dtype == "text")
if sys.version_info < (3, 0):
- assert (type(p.value[0]) == unicode)
+ assert isinstance(p.value[0], unicode)
else:
- assert (type(p.value[0]) == str)
+ assert isinstance(p.value[0], str)
def test_int(self):
p = Property("test", value=111)
assert(p.dtype == "int")
- assert(type(p.value[0]) == int)
+ assert isinstance(p.value[0], int)
def test_float(self):
p = Property("test", value=3.14)
assert(p.dtype == "float")
- assert(type(p.value[0]) == float)
+ assert isinstance(p.value[0], float)
def test_datetime(self):
p = Property("test", value=dt.now())
assert(p.dtype == "datetime")
- assert(type(p.value[0]) == dt)
+ assert isinstance(p.value[0], dt)
def test_date(self):
p = Property("test", dt.now().date())
assert(p.dtype == "date")
- assert(type(p.value[0]) == date)
+ assert isinstance(p.value[0], date)
def test_time(self):
p = Property("test", value=dt.now().time())
assert(p.dtype == "time")
- assert(type(p.value[0]) == time)
+ assert isinstance(p.value[0], time)
def test_boolean(self):
p = Property("test", True)
assert(p.dtype == "boolean")
- assert(type(p.value[0]) == bool)
+ assert isinstance(p.value[0], bool)
p = Property("test", False)
assert(p.dtype == "boolean")
- assert(type(p.value[0]) == bool)
+ assert isinstance(p.value[0], bool)
def test_read_write(self):
doc = Document("author")
@@ -79,37 +79,37 @@ class TestInferType(unittest.TestCase):
p = new_sec.properties["strprop"]
assert(p.dtype == "string")
if sys.version_info < (3, 0):
- assert(type(p.value[0]) == unicode)
+ assert isinstance(p.value[0], unicode)
else:
- assert(type(p.value[0]) == str)
+ assert isinstance(p.value[0], str)
p = new_sec.properties["txtprop"]
assert(p.dtype == "text")
if sys.version_info < (3, 0):
- assert(type(p.value[0]) == unicode)
+ assert isinstance(p.value[0], unicode)
else:
- assert(type(p.value[0]) == str)
+ assert isinstance(p.value[0], str)
p = new_sec.properties["intprop"]
assert(p.dtype == "int")
- assert(type(p.value[0]) == int)
+ assert isinstance(p.value[0], int)
p = new_sec.properties["floatprop"]
assert(p.dtype == "float")
- assert(type(p.value[0]) == float)
+ assert isinstance(p.value[0], float)
p = new_sec.properties["datetimeprop"]
assert(p.dtype == "datetime")
- assert(type(p.value[0]) == dt)
+ assert isinstance(p.value[0], dt)
p = new_sec.properties["dateprop"]
assert(p.dtype == "date")
- assert(type(p.value[0]) == date)
+ assert isinstance(p.value[0], date)
p = new_sec.properties["timeprop"]
assert(p.dtype == "time")
- assert(type(p.value[0]) == time)
+ assert isinstance(p.value[0], time)
p = new_sec.properties["boolprop"]
assert(p.dtype == "boolean")
- assert(type(p.value[0]) == bool)
+ assert isinstance(p.value[0], bool)
diff --git a/test/test_property.py b/test/test_property.py
index c122f97..cbcaade 100644
--- a/test/test_property.py
+++ b/test/test_property.py
@@ -73,6 +73,24 @@ class TestProperty(unittest.TestCase):
p6 = Property("test", {"name": "Marie", "name":"Johanna"})
self.assertEqual(len(p6), 1)
+ # Test tuple dtype value.
+ t = Property(name="Location", value='(39.12; 67.19)', dtype='2-tuple')
+ tuple_value = t.value[0] # As the formed tuple is a list of list
+ self.assertEqual(tuple_value[0], '39.12')
+ self.assertEqual(tuple_value[1], '67.19')
+
+ # Test invalid tuple length
+ with self.assertRaises(ValueError):
+ _ = Property(name="Public-Key", value='(5689; 1254; 687)', dtype='2-tuple')
+
+ # Test missing tuple length.
+ with self.assertRaises(ValueError):
+ _ = Property(name="Public-Key", value='(5689; 1254; 687)', dtype='-tuple')
+
+ # Test invalid tuple format.
+ with self.assertRaises(ValueError):
+ _ = Property(name="Public-Key", value='5689; 1254; 687', dtype='3-tuple')
+
def test_get_set_value(self):
values = [1, 2, 3, 4, 5]
p = Property("property", value=values)
diff --git a/test/test_samplefile.py b/test/test_samplefile.py
index 92ae8ec..3bd5ec6 100644
--- a/test/test_samplefile.py
+++ b/test/test_samplefile.py
@@ -197,7 +197,7 @@ class AttributeTest(unittest.TestCase):
def test_conversion_int_to_float(self):
p = odml.Property("test", "1", dtype="int")
self.assertEqual(p.dtype, "int")
- self.assertEqual(type(p.value[0]), int)
+ self.assertIsInstance(p.value[0], int)
p.dtype = "float" # change dtype
self.assertEqual(p.dtype, "float")
self.assertEqual(p.value[0], 1.0)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 5
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libxml2-dev libxslt1-dev lib32z1-dev"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isodate==0.6.1
lxml==5.3.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
-e git+https://github.com/G-Node/python-odml.git@8953343f0f4616c0a71087d406b5f6d4a2036748#egg=odML
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
PyYAML==6.0.1
rdflib==5.0.0
six==1.17.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: python-odml
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- isodate==0.6.1
- lxml==5.3.1
- pyyaml==6.0.1
- rdflib==5.0.0
- six==1.17.0
prefix: /opt/conda/envs/python-odml
| [
"test/test_dtypes.py::TestTypes::test_bool",
"test/test_dtypes.py::TestTypes::test_date",
"test/test_dtypes.py::TestTypes::test_datetime",
"test/test_dtypes.py::TestTypes::test_float",
"test/test_dtypes.py::TestTypes::test_int",
"test/test_dtypes.py::TestTypes::test_str",
"test/test_dtypes.py::TestTypes::test_time"
] | [] | [
"test/test_dtypes.py::TestTypes::test_dtype_none",
"test/test_dtypes.py::TestTypes::test_tuple",
"test/test_infer_type.py::TestInferType::test_boolean",
"test/test_infer_type.py::TestInferType::test_date",
"test/test_infer_type.py::TestInferType::test_datetime",
"test/test_infer_type.py::TestInferType::test_float",
"test/test_infer_type.py::TestInferType::test_int",
"test/test_infer_type.py::TestInferType::test_read_write",
"test/test_infer_type.py::TestInferType::test_string",
"test/test_infer_type.py::TestInferType::test_text",
"test/test_infer_type.py::TestInferType::test_time",
"test/test_property.py::TestProperty::test_bool_conversion",
"test/test_property.py::TestProperty::test_dtype",
"test/test_property.py::TestProperty::test_get_set_value",
"test/test_property.py::TestProperty::test_merge",
"test/test_property.py::TestProperty::test_name",
"test/test_property.py::TestProperty::test_parent",
"test/test_property.py::TestProperty::test_path",
"test/test_property.py::TestProperty::test_set_id",
"test/test_property.py::TestProperty::test_str_to_int_convert",
"test/test_property.py::TestProperty::test_value",
"test/test_property.py::TestProperty::test_value_origin",
"test/test_samplefile.py::SampleFileCreatorTest::test_samplefile",
"test/test_samplefile.py::SampleFileOperationTest::test_find_key",
"test/test_samplefile.py::SampleFileOperationTest::test_restore",
"test/test_samplefile.py::SampleFileOperationTest::test_save",
"test/test_samplefile.py::SampleFileOperationTest::test_xml_writer_version",
"test/test_samplefile.py::AttributeTest::test_conversion_float_to_int",
"test/test_samplefile.py::AttributeTest::test_conversion_int_to_float",
"test/test_samplefile.py::AttributeTest::test_value_float",
"test/test_samplefile.py::AttributeTest::test_value_int",
"test/test_samplefile.py::CopyTest::test_dependence",
"test/test_samplefile.py::CopyTest::test_independence",
"test/test_samplefile.py::MiscTest::test_findall_related",
"test/test_samplefile.py::MiscTest::test_get_property_by_path",
"test/test_samplefile.py::MiscTest::test_get_section_by_path",
"test/test_samplefile.py::MiscTest::test_paths",
"test/test_samplefile.py::MiscTest::test_reorder_first",
"test/test_samplefile.py::MiscTest::test_reorder_post",
"test/test_samplefile.py::MiscTest::test_save_version",
"test/test_samplefile.py::MiscTest::test_section_path"
] | [] | BSD 4-Clause "Original" or "Old" License | 2,299 | 2,456 | [
"odml/doc.py",
"odml/dtypes.py",
"odml/tools/dict_parser.py",
"odml/tools/odmlparser.py",
"odml/tools/xmlparser.py"
] |
|
UBC-MDS__PyPunisher-72 | 859c2f19db06c3bb7b488645f65dd286a1ba2a65 | 2018-03-17 05:02:23 | 859c2f19db06c3bb7b488645f65dd286a1ba2a65 | diff --git a/pypunisher/__init__.py b/pypunisher/__init__.py
index 97d4631..0730e06 100644
--- a/pypunisher/__init__.py
+++ b/pypunisher/__init__.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+
"""
PyPunisher
==========
diff --git a/pypunisher/_checks.py b/pypunisher/_checks.py
index a268a99..ad8a08f 100644
--- a/pypunisher/_checks.py
+++ b/pypunisher/_checks.py
@@ -1,8 +1,8 @@
-"""
-
- Checks
- ~~~~~~
+#!/usr/bin/env python
+"""
+Checks
+======
"""
import numpy as np
@@ -48,20 +48,21 @@ def input_checks(locals_):
"""
# Sort so that the order of the parameter name
# are in a reliable (alphabetical) order.
- param_a, param_b = sorted(k for k, p in locals_.items() if k != 'self')
- locals_non_non = {k: v for k, v in locals_.items()
- if v is not None and k != 'self'}
+ ALLOWED = ('min_change', 'n_features')
+ param_a, param_b = sorted(k for k, p in locals_.items() if k in ALLOWED)
+ locals_non_none = {k: v for k, v in locals_.items()
+ if v is not None and k in ALLOWED}
- if len(locals_non_non) != 1:
+ if len(locals_non_none) != 1:
raise TypeError(
"At least one of `{a}` and `{b}` must be None.".format(
a=param_a, b=param_b
)
)
- # Unpack the single key and value pair
- name, obj = tuple(locals_non_non.items())[0]
- if obj is None and not isinstance(obj, (int, float)):
+ # Unpack the single key and value pair.
+ name, obj = tuple(locals_non_none.items())[0]
+ if not isinstance(obj, (int, float)):
raise TypeError(
"`{}` must be of type int or float.".format(name)
)
diff --git a/pypunisher/metrics/__init__.py b/pypunisher/metrics/__init__.py
index 0ef76a8..8fe54ef 100644
--- a/pypunisher/metrics/__init__.py
+++ b/pypunisher/metrics/__init__.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+
"""
Metrics
=======
diff --git a/pypunisher/metrics/criterion.py b/pypunisher/metrics/criterion.py
index 91c5150..70e599b 100644
--- a/pypunisher/metrics/criterion.py
+++ b/pypunisher/metrics/criterion.py
@@ -1,8 +1,8 @@
-"""
-
- Information Criterion
- ~~~~~~~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+ Information Criterion
+ =====================
"""
from numpy import log, ndarray, pi
from pypunisher._checks import model_check
diff --git a/pypunisher/selection_engines/__init__.py b/pypunisher/selection_engines/__init__.py
index 65479f8..aac2dd9 100644
--- a/pypunisher/selection_engines/__init__.py
+++ b/pypunisher/selection_engines/__init__.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python
+
"""
Selection Engines
=================
diff --git a/pypunisher/selection_engines/_utils.py b/pypunisher/selection_engines/_utils.py
index 9315f08..013198a 100644
--- a/pypunisher/selection_engines/_utils.py
+++ b/pypunisher/selection_engines/_utils.py
@@ -1,8 +1,8 @@
-"""
-
- Utils
- ~~~~~
+#!/usr/bin/env python
+"""
+Utils
+=====
"""
def get_n_features(matrix, min_=2):
@@ -47,8 +47,7 @@ def enforce_use_of_all_cpus(model):
exists
"""
- if hasattr(model, 'n_jobs'):
- setattr(model, 'n_jobs', -1)
+ setattr(model, 'n_jobs', -1)
return model
diff --git a/pypunisher/selection_engines/selection.py b/pypunisher/selection_engines/selection.py
index 067a3f5..561aec7 100644
--- a/pypunisher/selection_engines/selection.py
+++ b/pypunisher/selection_engines/selection.py
@@ -1,8 +1,8 @@
-"""
-
- Forward and Backward Selection Algorithms
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+Forward and Backward Selection Algorithms
+=========================================
"""
from pypunisher.metrics.criterion import aic, bic
from pypunisher._checks import model_check, array_check, input_checks
@@ -93,6 +93,27 @@ class Selection(object):
score = self._model.score(X_val, y_val)
return score
+ @staticmethod
+ def _do_not_skip(kwargs):
+ """Check for skipping override by looking
+ for `_do_not_skip` in keyword arguments
+ If it is present, the loops in the algorithms
+ will be run to exhaustion.
+
+ Args:
+ kwargs : dict
+ Keyword Args
+
+ Returns:
+ Bool
+ If `_do_not_skip` is not present
+ or `_do_not_skip` is present and is True.
+ Otherwise, the value of `do_not_skip`
+ is returned.
+
+ """
+ return kwargs.get('_do_not_skip', True)
+
def _forward_break_criteria(self, S, min_change, best_j_score,
j_score_dict, n_features):
"""Check if `forward()` should break
@@ -128,7 +149,7 @@ class Selection(object):
else:
return False
- def forward(self, min_change=0.5, n_features=None):
+ def forward(self, min_change=0.5, n_features=None, **kwargs):
"""Perform Forward Selection on a Sklearn model.
Args:
@@ -140,6 +161,10 @@ class Selection(object):
Note: `min_change` must be None in order for `n_features` to operate.
Floats will be regarded as proportions of the total
that must lie on (0, 1).
+ kwargs : Keyword Args
+ Includes:
+ * `_do_not_skip`: for interal use only; it is
+ not recommended that users use this parameter.
Returns:
S : list
@@ -150,14 +175,18 @@ class Selection(object):
S = list()
best_score = None
itera = list(range(self._total_number_of_features))
+ do_not_skip = self._do_not_skip(kwargs)
- if n_features:
+ if n_features and do_not_skip:
n_features = parse_n_features(n_features, total=len(itera))
for i in range(self._total_number_of_features):
if self._verbose:
print("Iteration: {}".format(i))
+ if not do_not_skip:
+ continue
+
# 1. Find best feature, j, to add.
j_score_dict = dict()
for j in itera:
@@ -182,7 +211,7 @@ class Selection(object):
return S
- def backward(self, n_features=0.5, min_change=None):
+ def backward(self, n_features=0.5, min_change=None, **kwargs):
"""Perform Backward Selection on a Sklearn model.
Args:
@@ -194,6 +223,14 @@ class Selection(object):
min_change : int or float, optional
The smallest change to be considered significant.
`n_features` must be None for `min_change` to operate.
+ kwargs : Keyword Args
+ Includes:
+ * `_do_not_skip` : bool
+ Explore loop exhaustion.
+ **For internal use only**; Not intended for outside use.
+ * `_last_score_punt` : bool
+ Relax `defeated_last_iter_score` decision boundary.
+ **For internal use only**. Not intended for outside use.
Returns:
S : list
@@ -205,8 +242,10 @@ class Selection(object):
"""
input_checks(locals())
S = list(range(self._total_number_of_features)) # start with all features
+ do_not_skip = self._do_not_skip(kwargs)
+ last_score_punt = kwargs.get('_last_score_punt', False)
- if n_features:
+ if n_features and do_not_skip:
n_features = parse_n_features(n_features, total=len(S))
last_iter_score = self._fit_and_score(S, feature=None, algorithm='backward')
@@ -215,6 +254,9 @@ class Selection(object):
if self._verbose:
print("Iteration: {}".format(i))
+ if not do_not_skip:
+ continue
+
# 1. Hunt for the least predictive feature.
best = {'feature': None, 'score': None, 'defeated_last_iter_score': True}
for j in S:
@@ -228,13 +270,13 @@ class Selection(object):
if isinstance(n_features, int):
S.remove(to_drop) # blindly drop.
last_iter_score = best_new_score
- if len(S) == n_features:
- break
- else:
+ if not len(S) == n_features:
continue # i.e., ignore criteria below.
+ else:
+ break
# 2b. Halt if the change is not longer considered significant.
- if isinstance(min_change, (int, float)):
- if best['defeated_last_iter_score']:
+ else:
+ if best['defeated_last_iter_score'] or last_score_punt:
if (best_new_score - last_iter_score) < min_change:
break # there was a change, but it was not large enough.
else:
@@ -243,8 +285,4 @@ class Selection(object):
else:
break
- # 2c. Halt if only one feature remains.
- if len(S) == 1:
- break
-
return S
| Feedback on Milestone 2
Hi All,
Nice work for milestone 2. I like your comprehensive designs for the entire package. Here is my comments:
1. Good Practice to state out installation requires python 3.6
2. I like your coverage part to detail your test coverage, excellent
3. For __init__.py, line 4 and 5, why not just list.append() to have version number?
4. For selection_engines/__init__.py, I like your comments for the issue in scipy
5. Please improve your style in Python programming, you can refer to https://google.github.io/styleguide/pyguide.html as in selection.py line 4, this is not professional, also the space you have between lines are not equal.
6. For your first line of your python file, it is suggested that you can include #!/usr/bin/env python just in case the user is running your code in Linux(like me)
7. for _fit_and_score(self, S, feature, algorithm), what if algorithm input is wrong input parameters? if you thought about that?
8. For function backward(), S is not a good naming for a list
Regards
Jason | UBC-MDS/PyPunisher | diff --git a/tests/__init__.py b/tests/__init__.py
index e69de29..4265cc3 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -0,0 +1,1 @@
+#!/usr/bin/env python
diff --git a/tests/_defaults.py b/tests/_defaults.py
index b9a31c4..ddfc126 100644
--- a/tests/_defaults.py
+++ b/tests/_defaults.py
@@ -1,8 +1,8 @@
-"""
+#!/usr/bin/env python
+"""
Default Base for Testing Against
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
+ ================================
"""
from sklearn.linear_model import LinearRegression
from tests._test_data import X_train, y_train, X_val, y_val
diff --git a/tests/_test_data.py b/tests/_test_data.py
index 5b74671..adc5f13 100644
--- a/tests/_test_data.py
+++ b/tests/_test_data.py
@@ -1,31 +1,41 @@
-"""
-
- Test Data
- ~~~~~~~~~
-
- Generate: y = x + e, where e ~ Uniform(0, 50) and
- `x` is embedded as the middle column in a zero matrix.
- That is, only ONE column is predictive of y, the rest are
- trivial column vectors.
+#!/usr/bin/env python
+"""
+Test Data
+=========
+Generate: y = x + e, where e ~ Uniform(0, 50) and
+`x` is embedded as the middle column in a zero matrix.
+That is, only ONE column is predictive of y, the rest are
+trivial column vectors.
+
+X_train : 2D array
+ Training Features.
+X_val : 2D array
+ Validation Features.
+y_train : 1D array
+ Training labels.
+y_val : 1D array
+ Validation Labels
+true_best_feature : int, list
+ Denotes the best feature
+ that is actually predictive of the response.
"""
import numpy as np
from sklearn.model_selection import train_test_split
SEED = 99
-
-features = 20
-obs = 501
-middle_feature = features // 2
+FEATURES = 20
+OBSERVATIONS = 501
+middle_feature = FEATURES // 2
np.random.seed(SEED)
-X = np.zeros((obs, features))
-y = np.arange(obs)
-X[:, middle_feature] = y + np.random.uniform(0, 50, size=obs)
+X = np.zeros((OBSERVATIONS, FEATURES))
+y = np.arange(OBSERVATIONS)
+X[:, middle_feature] = y + np.random.uniform(0, 50, size=OBSERVATIONS)
X_train, X_val, y_train, y_val = train_test_split(X, y, random_state=SEED)
-TRUE_BEST_FEATURE = middle_feature
+true_best_feature = middle_feature
# Visualize ---
# import matplotlib.pyplot as plt
diff --git a/tests/_wrappers.py b/tests/_wrappers.py
index e4404f9..2dcf69d 100644
--- a/tests/_wrappers.py
+++ b/tests/_wrappers.py
@@ -1,8 +1,8 @@
-"""
-
- Wrapper Functions for Testing
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+Wrapper Functions for Testing
+=============================
"""
from copy import deepcopy
from tests._defaults import DEFAULT_SELECTION_PARAMS
diff --git a/tests/test_backward_selection.py b/tests/test_backward_selection.py
index d44f603..cdb8b2a 100644
--- a/tests/test_backward_selection.py
+++ b/tests/test_backward_selection.py
@@ -1,8 +1,8 @@
-"""
-
- Tests Specific to Backward Selection
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+Tests Specific to Backward Selection
+====================================
"""
import os
import sys
@@ -13,33 +13,88 @@ sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
from tests._wrappers import backward
+from tests._test_data import X_train
+
+# -----------------------------------------------------------------------------
+# Test `backward()` Params
+# -----------------------------------------------------------------------------
-def test_backward_params():
+
+def test_n_features_greater_than_zero_backward():
"""
- Check parameters to `backward()` raise when expected.
+ Check `backward()`'s `n_features` raises when
+ not greater than zero
"""
msg = "`n_features` must be greater than zero."
with pytest.raises(ValueError, match=msg):
backward(n_features=-0.5, min_change=None)
+
+def test_min_change_greater_than_zero_backward():
+ """
+ Check `backward()`'s `min_change` raises when
+ not greater than zero
+ """
msg = "`min_change` must be greater than zero."
with pytest.raises(ValueError, match=msg):
backward(n_features=None, min_change=-0.75)
+
+def test_min_change_fails_on_string_backward():
+ """
+ Check that backward raises when passed a string
+ for `min_change`.
+ """
+ msg = "`min_change` must be of type int or float."
+ with pytest.raises(TypeError, match=msg):
+ backward(min_change='-0.75', n_features=None)
+
+
+def test_n_features_fails_on_string_backward():
+ """
+ Check that backward raises when passed a string
+ for `n_features`.
+ """
+ msg = "`n_features` must be of type int or float."
+ with pytest.raises(TypeError, match=msg):
+ backward(min_change=None, n_features='-0.75')
+
+
+def test_both_non_none_backward():
+ """
+ Check `backward()` raise when at least one
+ of `min_change` or `n_features` are not None.
+ """
# Note: items in backticks (``) will be in alphabetical order.
msg = "At least one of `min_change` and `n_features` must be None."
with pytest.raises(TypeError, match=msg):
backward(n_features=0.5, min_change=0.3)
-
- msg = "`criterion` must be one of: None, 'aic', 'bic'."
- with pytest.raises(ValueError, match=msg):
- backward(n_features=0.5, criterion='acc')
+
+def test_float_greater_than_one_raises_backward():
+ """
+ Test that float values not on (0, 1) raise.
+ """
msg = "^If a float, `n_features` must be on"
with pytest.raises(ValueError, match=msg):
backward(n_features=1.5)
-
+
+
+def test_min_features_requirement_backward():
+ """
+ Check that the requirement that at least
+ two features must be present.
+ """
msg = "less than 2 features present."
with pytest.raises(IndexError, match=msg):
- backward(X_train=ones((501, 1)), X_val=ones((501, 1)))
\ No newline at end of file
+ backward(X_train=ones((501, 1)), X_val=ones((501, 1)))
+
+
+# -----------------------------------------------------------------------------
+# Test Exhausting loop
+# -----------------------------------------------------------------------------
+
+def test_loop_exhaust():
+ """Text Exhausting backward()'s loop."""
+ backward(n_features=X_train.shape[-1], min_change=None, _do_not_skip=False)
diff --git a/tests/test_criterion.py b/tests/test_criterion.py
index 9e2b8ff..2773219 100644
--- a/tests/test_criterion.py
+++ b/tests/test_criterion.py
@@ -1,8 +1,8 @@
-"""
-
- Criterion Tests
- ~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+Criterion Tests
+===============
"""
import os
import sys
@@ -15,6 +15,7 @@ import statsmodels.api as sm
from pypunisher.metrics.criterion import aic, bic
from sklearn.linear_model import LinearRegression
from tests._test_data import X_train, y_train
+from tests._wrappers import forward, backward
COMP_TOLERANCE = 200 # comparision tolerance between floats
@@ -49,6 +50,22 @@ def test_metric_model_param():
metric(kind, X_train=X_train, y_train=y_train)
+# -----------------------------------------------------------------------------
+# Test criterion through selection
+# -----------------------------------------------------------------------------
+
+
+def test_selection_class_use_of_criterion():
+ """Test Criterion through `forward()` and `backward()."""
+
+ msg = "`criterion` must be one of: None, 'aic', 'bic'."
+ with pytest.raises(ValueError, match=msg):
+ forward(min_change=0.5, criterion='acc')
+
+ with pytest.raises(ValueError, match=msg):
+ backward(n_features=0.5, criterion='Santa')
+
+
# -----------------------------------------------------------------------------
# `data` Param
# -----------------------------------------------------------------------------
@@ -68,6 +85,7 @@ def test_metric_data_param():
else:
metric(sk_model, X_train=kind, y_train=y_train)
+
# -----------------------------------------------------------------------------
# Metric output
# -----------------------------------------------------------------------------
diff --git a/tests/test_forward_selection.py b/tests/test_forward_selection.py
index fa62b01..93d27f0 100644
--- a/tests/test_forward_selection.py
+++ b/tests/test_forward_selection.py
@@ -1,8 +1,8 @@
-"""
-
- Tests Specific to Forward Selection
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+Tests Specific to Forward Selection
+===================================
"""
import os
import sys
@@ -12,25 +12,68 @@ sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
from tests._wrappers import forward
+from tests._test_data import X_train
+
+
+# -----------------------------------------------------------------------------
+# Test `forward()` Params
+# -----------------------------------------------------------------------------
+
+
+def test_n_features_greater_than_zero_forward():
+ """
+ Check that `n_features` is required to be > 0.
+ """
+ msg = "`n_features` must be greater than zero."
+ with pytest.raises(ValueError, match=msg):
+ forward(min_change=None, n_features=-0.75)
-def test_forward_params():
+def test_min_change_greater_than_zero_forward():
"""
- Check parameters to `forward()` raise when expected.
+ Check that `min_change` is required to be > 0.
"""
msg = "`min_change` must be greater than zero."
with pytest.raises(ValueError, match=msg):
forward(min_change=-0.5, n_features=None)
- msg = "`n_features` must be greater than zero."
- with pytest.raises(ValueError, match=msg):
- forward(min_change=None, n_features=-0.75)
+def test_n_features_fails_on_string_forward():
+ """
+ Check that forward raises when passed a string
+ for `n_features`.
+ """
+ msg = "`n_features` must be of type int or float."
+ with pytest.raises(TypeError, match=msg):
+ forward(min_change=None, n_features='-0.75')
+
+
+def test_min_change_fails_on_string_forward():
+ """
+ Check that forward raises when passed a string
+ for `min_change`.
+ """
+ msg = "`min_change` must be of type int or float."
+ with pytest.raises(TypeError, match=msg):
+ forward(min_change='-0.75', n_features=None)
+
+
+def test_both_non_none_forward():
+ """
+ Check `forward()` raise when at least one
+ of `min_change` or `n_features` are not None.
+ """
# Note: items in backticks (``) will be in alphabetical order.
msg = "At least one of `min_change` and `n_features` must be None."
with pytest.raises(TypeError, match=msg):
forward(min_change=0.5, n_features=0.3)
-
- msg = "`criterion` must be one of: None, 'aic', 'bic'."
- with pytest.raises(ValueError, match=msg):
- forward(min_change=0.5, criterion='acc')
+
+
+# -----------------------------------------------------------------------------
+# Test Exhausting loop
+# -----------------------------------------------------------------------------
+
+def test_loop_exhaust():
+ """Text Exhausting forwards()'s loop."""
+ # Should not raise.
+ forward(n_features=X_train.shape[-1], min_change=None, _do_not_skip=False)
diff --git a/tests/test_selection.py b/tests/test_selection.py
index e3ee45a..2eb6098 100644
--- a/tests/test_selection.py
+++ b/tests/test_selection.py
@@ -1,8 +1,8 @@
-"""
-
- Run Tests Common to Forward and Backward Selection
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#!/usr/bin/env python
+"""
+Run Tests Common to Forward and Backward Selection
+==================================================
"""
import os
import sys
@@ -14,7 +14,7 @@ sys.path.insert(0, os.path.abspath("../"))
from pypunisher import Selection
from tests._wrappers import forward, backward
-from tests._test_data import TRUE_BEST_FEATURE
+from tests._test_data import true_best_feature
from tests._defaults import DEFAULT_SELECTION_PARAMS
@@ -75,6 +75,18 @@ def test_sklearn_model_methods():
with pytest.raises(AttributeError):
Selection(**d)
+# -----------------------------------------------------------------------------
+# Test Multiple Features
+# -----------------------------------------------------------------------------
+
+
+def test_passing_significant_change():
+ """
+ Test cases where there is a significant `min_change`
+ during backward selection.
+ """
+ backward(n_features=None, min_change=1, _last_score_punt=True)
+
# -----------------------------------------------------------------------------
# Outputs: Run the Forward and Backward Selection Algorithms
@@ -88,7 +100,7 @@ forward_output += forward(n_features=1, min_change=None)
# Force the backward selection algorithm to
# select the single feature it thinks is most predictive.
# If implemented correctly, `backward()` should be able to
-# identify `TRUE_BEST_FEATURE` as predictive.
+# identify `true_best_feature` as predictive.
backward_output = backward(n_features=1)
# Run using the other parameter option
@@ -142,7 +154,7 @@ def output_values(output):
in the contrived data.
"""
msg = "The algorithm failed to select the predictive feature."
- assert TRUE_BEST_FEATURE in output, msg
+ assert true_best_feature in output, msg
def test_fsel_output_values():
@@ -206,4 +218,4 @@ def test_fsel_verbose_output():
def test_bsel_verbose_output():
backward_output = backward(n_features=2, min_change=None, verbose=True)
- assert len(backward_output) >= 1
\ No newline at end of file
+ assert len(backward_output) >= 1
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 7
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==3.3.9
certifi==2025.1.31
charset-normalizer==3.4.1
codecov==2.1.13
coverage==7.8.0
dill==0.3.9
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
isort==6.0.1
joblib==1.4.2
mccabe==0.7.0
numpy==2.0.2
packaging==24.2
pandas==2.2.3
patsy==1.0.1
platformdirs==4.3.7
pluggy==1.5.0
pylint==3.3.6
-e git+https://github.com/UBC-MDS/PyPunisher.git@859c2f19db06c3bb7b488645f65dd286a1ba2a65#egg=pypunisher
pytest==8.3.5
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.32.3
scikit-learn==1.6.1
scipy==1.13.1
six==1.17.0
statsmodels==0.14.4
threadpoolctl==3.6.0
tomli==2.2.1
tomlkit==0.13.2
tqdm==4.67.1
typing_extensions==4.13.0
tzdata==2025.2
urllib3==2.3.0
| name: PyPunisher
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==3.3.9
- certifi==2025.1.31
- charset-normalizer==3.4.1
- codecov==2.1.13
- coverage==7.8.0
- dill==0.3.9
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- isort==6.0.1
- joblib==1.4.2
- mccabe==0.7.0
- numpy==2.0.2
- packaging==24.2
- pandas==2.2.3
- patsy==1.0.1
- platformdirs==4.3.7
- pluggy==1.5.0
- pylint==3.3.6
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.32.3
- scikit-learn==1.6.1
- scipy==1.13.1
- six==1.17.0
- statsmodels==0.14.4
- threadpoolctl==3.6.0
- tomli==2.2.1
- tomlkit==0.13.2
- tqdm==4.67.1
- typing-extensions==4.13.0
- tzdata==2025.2
- urllib3==2.3.0
prefix: /opt/conda/envs/PyPunisher
| [
"tests/test_backward_selection.py::test_min_change_fails_on_string_backward",
"tests/test_backward_selection.py::test_n_features_fails_on_string_backward",
"tests/test_backward_selection.py::test_loop_exhaust",
"tests/test_forward_selection.py::test_n_features_fails_on_string_forward",
"tests/test_forward_selection.py::test_min_change_fails_on_string_forward",
"tests/test_forward_selection.py::test_loop_exhaust",
"tests/test_selection.py::test_passing_significant_change"
] | [] | [
"tests/test_backward_selection.py::test_n_features_greater_than_zero_backward",
"tests/test_backward_selection.py::test_min_change_greater_than_zero_backward",
"tests/test_backward_selection.py::test_both_non_none_backward",
"tests/test_backward_selection.py::test_float_greater_than_one_raises_backward",
"tests/test_backward_selection.py::test_min_features_requirement_backward",
"tests/test_criterion.py::test_metric_model_param",
"tests/test_criterion.py::test_selection_class_use_of_criterion",
"tests/test_criterion.py::test_metric_data_param",
"tests/test_criterion.py::test_metric_output",
"tests/test_criterion.py::test_metric_output_value",
"tests/test_forward_selection.py::test_n_features_greater_than_zero_forward",
"tests/test_forward_selection.py::test_min_change_greater_than_zero_forward",
"tests/test_forward_selection.py::test_both_non_none_forward",
"tests/test_selection.py::test_input_types",
"tests/test_selection.py::test_too_few_features",
"tests/test_selection.py::test_sklearn_model_methods",
"tests/test_selection.py::test_fsel_output_type",
"tests/test_selection.py::test_bsel_output_type",
"tests/test_selection.py::test_n_features",
"tests/test_selection.py::test_fsel_output_values",
"tests/test_selection.py::test_bsel_output_values",
"tests/test_selection.py::test_fsel_aic_output",
"tests/test_selection.py::test_fsel_bic_output",
"tests/test_selection.py::test_bsel_aic_output",
"tests/test_selection.py::test_bsel_bic_output",
"tests/test_selection.py::test_fsel_min_change_output",
"tests/test_selection.py::test_bsel_min_change_output",
"tests/test_selection.py::test_fsel_verbose_output",
"tests/test_selection.py::test_bsel_verbose_output"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,301 | 2,473 | [
"pypunisher/__init__.py",
"pypunisher/_checks.py",
"pypunisher/metrics/__init__.py",
"pypunisher/metrics/criterion.py",
"pypunisher/selection_engines/__init__.py",
"pypunisher/selection_engines/_utils.py",
"pypunisher/selection_engines/selection.py"
] |
|
dask__dask-3301 | 11a50f0d329bdaf1ea6b7f0cff9500f55699fd36 | 2018-03-19 16:23:38 | 48c4a589393ebc5b335cc5c7df291901401b0b15 | jakirkham: LGTM. Thanks @martindurant.
Were you running into some bad behavior because of this or was it just slow?
martindurant: @jakirkham , this is specifically in response to #3248 . Something more sophisticated might be useful in any more complicated case, to minimise the calls to astype.
jakirkham: Thanks for the info.
Would expect that endianness would be preserved by `promote_types`. Though resolving conflicts between different endians is less clear (and may be at the crux of this issue).
Didn't see anything that matched on the NumPy issue tracker so raised as issue ( https://github.com/numpy/numpy/issues/10767 ). | diff --git a/dask/array/core.py b/dask/array/core.py
index a4990fe95..1d3d54f75 100644
--- a/dask/array/core.py
+++ b/dask/array/core.py
@@ -1208,7 +1208,12 @@ class Array(Base):
@wraps(store)
def store(self, target, **kwargs):
- return store([self], [target], **kwargs)
+ r = store([self], [target], **kwargs)
+
+ if kwargs.get("return_stored", False):
+ r = r[0]
+
+ return r
def to_hdf5(self, filename, datapath, **kwargs):
""" Store array in HDF5 file
@@ -2611,8 +2616,12 @@ def concatenate(seq, axis=0, allow_unknown_chunksizes=False):
cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq]))
- dt = reduce(np.promote_types, [a.dtype for a in seq])
- seq = [x.astype(dt) for x in seq]
+ seq_dtypes = [a.dtype for a in seq]
+ if len(set(seq_dtypes)) > 1:
+ dt = reduce(np.promote_types, seq_dtypes)
+ seq = [x.astype(dt) for x in seq]
+ else:
+ dt = seq_dtypes[0]
names = [a.name for a in seq]
| optimization of array.concatenate depends strongly on endianness
I have encountered a dask optimization issue that I think is at the core of xgcm/xmitgcm#73.
Basically, I am constructing a big dask array by concatenating many numpy memmaps, each created within a `from_delayed` function. Then I want to get back out a single value from this big array. In theory this should go very fast and use very little memory, as if I had accessed the original memmap. And indeed it does...unless the dtype is big endian!
Here is how to reproduce this issue. First create some test data
```python
import numpy as np
import dask
import dask.array
# create some example binary data (8GB)
# large dataset is necessary to see timing differences
shape = (1, 50, 1000, 2000)
nfiles = 10
dtype = np.dtype('f8') # note: this doesn't matter at this stage
data = np.zeros(shape, dtype)
filenames = ['data.{:04d}'.format(n) for n in range(nfiles)]
for fname in filenames:
data.tofile(fname)
```
Now define some functions to read it into dask arrays
```python
def read_as_memmap(fname, dtype):
return np.memmap(fname, dtype=dtype, shape=shape)
def lazy_memmap(fname, dtype):
return dask.array.from_delayed(
dask.delayed(read_as_memmap)(fname, dtype),
shape, dtype)
def read_all_data(dtype):
return dask.array.concatenate(
[lazy_memmap(fname, dtype) for fname in filenames])
```
Now perform a timing test on reading back a single value with default (little endian) datatype
```python
dtype = np.dtype('f8')
all_data = read_all_data(dtype)
%timeit lazy_memmap(filenames[-1], dtype)[0, 0, 0].compute()
%timeit all_data[-1, 0, 0, 0].compute()
```
On my machine I get
```
100 loops, best of 3: 2.54 ms per loop
100 loops, best of 3: 2.3 ms per loop
```
basically identical and very fast, as we expect. The dask graph for `all_data[-1, 0, 0, 0]` looks like this:

Instead, if I repeat the test with a big-endian dtype
```python
dtype = np.dtype('>f8')
all_data = read_all_data(dtype)
%timeit lazy_memmap(filenames[-1], dtype)[0, 0, 0].compute()
%timeit all_data[-1, 0, 0, 0].compute()
```
I get this:
```
100 loops, best of 3: 2.57 ms per loop
1 loop, best of 3: 929 ms per loop
```
The `ResourceProfiler` diagnostics also indicate much higher memory usage. Now the dask graph looks like this

There appears to be an extra call to `astype` which is interfering with the optimization somehow.
I'm using dask version 0.17.1.
| dask/dask | diff --git a/dask/array/tests/test_array_core.py b/dask/array/tests/test_array_core.py
index cc233f4b4..87f420c25 100644
--- a/dask/array/tests/test_array_core.py
+++ b/dask/array/tests/test_array_core.py
@@ -335,6 +335,17 @@ def test_concatenate():
pytest.raises(ValueError, lambda: concatenate([a, b, c], axis=2))
[email protected]('dtypes', [(('>f8', '>f8'), '>f8'),
+ (('<f4', '<f8'), '<f8')])
+def test_concatenate_types(dtypes):
+ dts_in, dt_out = dtypes
+ arrs = [np.zeros(4, dtype=dt) for dt in dts_in]
+ darrs = [from_array(arr, chunks=(2,)) for arr in arrs]
+
+ x = concatenate(darrs, axis=0)
+ assert x.dtype == dt_out
+
+
def test_concatenate_unknown_axes():
dd = pytest.importorskip('dask.dataframe')
pd = pytest.importorskip('pandas')
@@ -1513,6 +1524,26 @@ def test_store_locks():
assert lock.acquire_count == nchunks
+def test_store_method_return():
+ d = da.ones((10, 10), chunks=(2, 2))
+ a = d + 1
+
+ for compute in [False, True]:
+ for return_stored in [False, True]:
+ at = np.zeros(shape=(10, 10))
+ r = a.store(
+ at, get=dask.threaded.get,
+ compute=compute, return_stored=return_stored
+ )
+
+ if return_stored:
+ assert isinstance(r, Array)
+ elif compute:
+ assert r is None
+ else:
+ assert isinstance(r, Delayed)
+
+
@pytest.mark.xfail(reason="can't lock with multiprocessing")
def test_store_multiprocessing_lock():
d = da.ones((10, 10), chunks=(2, 2))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 1.21 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[complete]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio",
"numpydoc",
"sphinx",
"sphinx_rtd_theme",
"cloudpickle",
"pandas>=0.19.0",
"distributed"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
cloudpickle==2.2.1
contextvars==2.4
coverage==6.2
-e git+https://github.com/dask/dask.git@11a50f0d329bdaf1ea6b7f0cff9500f55699fd36#egg=dask
distributed==1.21.8
docutils==0.18.1
execnet==1.9.0
HeapDict==1.0.1
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.0.3
locket==1.0.0
MarkupSafe==2.0.1
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
msgpack==1.0.5
numpy==1.19.5
numpydoc==1.1.0
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pandas==1.1.5
partd==1.2.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
Pygments==2.14.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
requests==2.27.1
six==1.17.0
snowballstemmer==2.2.0
sortedcontainers==2.4.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tblib==1.7.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
toolz==0.12.0
tornado==6.1
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
zict==2.1.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: dask
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- babel==2.11.0
- charset-normalizer==2.0.12
- click==8.0.4
- cloudpickle==2.2.1
- contextvars==2.4
- coverage==6.2
- distributed==1.21.8
- docutils==0.18.1
- execnet==1.9.0
- heapdict==1.0.1
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- jinja2==3.0.3
- locket==1.0.0
- markupsafe==2.0.1
- msgpack==1.0.5
- numpy==1.19.5
- numpydoc==1.1.0
- pandas==1.1.5
- partd==1.2.0
- psutil==7.0.0
- pygments==2.14.0
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- requests==2.27.1
- six==1.17.0
- snowballstemmer==2.2.0
- sortedcontainers==2.4.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tblib==1.7.0
- tomli==1.2.3
- toolz==0.12.0
- tornado==6.1
- urllib3==1.26.20
- zict==2.1.0
prefix: /opt/conda/envs/dask
| [
"dask/array/tests/test_array_core.py::test_concatenate_types[dtypes0]",
"dask/array/tests/test_array_core.py::test_store_method_return"
] | [
"dask/array/tests/test_array_core.py::test_field_access",
"dask/array/tests/test_array_core.py::test_field_access_with_shape",
"dask/array/tests/test_array_core.py::test_matmul",
"dask/array/tests/test_array_core.py::test_from_array_names"
] | [
"dask/array/tests/test_array_core.py::test_getem",
"dask/array/tests/test_array_core.py::test_top",
"dask/array/tests/test_array_core.py::test_top_supports_broadcasting_rules",
"dask/array/tests/test_array_core.py::test_top_literals",
"dask/array/tests/test_array_core.py::test_atop_literals",
"dask/array/tests/test_array_core.py::test_concatenate3_on_scalars",
"dask/array/tests/test_array_core.py::test_chunked_dot_product",
"dask/array/tests/test_array_core.py::test_chunked_transpose_plus_one",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions_works_with_singleton_dimensions",
"dask/array/tests/test_array_core.py::test_broadcast_dimensions",
"dask/array/tests/test_array_core.py::test_Array",
"dask/array/tests/test_array_core.py::test_uneven_chunks",
"dask/array/tests/test_array_core.py::test_numblocks_suppoorts_singleton_block_dims",
"dask/array/tests/test_array_core.py::test_keys",
"dask/array/tests/test_array_core.py::test_Array_computation",
"dask/array/tests/test_array_core.py::test_stack",
"dask/array/tests/test_array_core.py::test_short_stack",
"dask/array/tests/test_array_core.py::test_stack_scalars",
"dask/array/tests/test_array_core.py::test_stack_promote_type",
"dask/array/tests/test_array_core.py::test_stack_rechunk",
"dask/array/tests/test_array_core.py::test_concatenate",
"dask/array/tests/test_array_core.py::test_concatenate_types[dtypes1]",
"dask/array/tests/test_array_core.py::test_concatenate_unknown_axes",
"dask/array/tests/test_array_core.py::test_concatenate_rechunk",
"dask/array/tests/test_array_core.py::test_concatenate_fixlen_strings",
"dask/array/tests/test_array_core.py::test_block_simple_row_wise",
"dask/array/tests/test_array_core.py::test_block_simple_column_wise",
"dask/array/tests/test_array_core.py::test_block_with_1d_arrays_row_wise",
"dask/array/tests/test_array_core.py::test_block_with_1d_arrays_multiple_rows",
"dask/array/tests/test_array_core.py::test_block_with_1d_arrays_column_wise",
"dask/array/tests/test_array_core.py::test_block_mixed_1d_and_2d",
"dask/array/tests/test_array_core.py::test_block_complicated",
"dask/array/tests/test_array_core.py::test_block_nested",
"dask/array/tests/test_array_core.py::test_block_3d",
"dask/array/tests/test_array_core.py::test_block_with_mismatched_shape",
"dask/array/tests/test_array_core.py::test_block_no_lists",
"dask/array/tests/test_array_core.py::test_block_invalid_nesting",
"dask/array/tests/test_array_core.py::test_block_empty_lists",
"dask/array/tests/test_array_core.py::test_block_tuple",
"dask/array/tests/test_array_core.py::test_binops",
"dask/array/tests/test_array_core.py::test_broadcast_shapes",
"dask/array/tests/test_array_core.py::test_elemwise_on_scalars",
"dask/array/tests/test_array_core.py::test_elemwise_with_ndarrays",
"dask/array/tests/test_array_core.py::test_elemwise_differently_chunked",
"dask/array/tests/test_array_core.py::test_elemwise_dtype",
"dask/array/tests/test_array_core.py::test_operators",
"dask/array/tests/test_array_core.py::test_operator_dtype_promotion",
"dask/array/tests/test_array_core.py::test_T",
"dask/array/tests/test_array_core.py::test_norm",
"dask/array/tests/test_array_core.py::test_broadcast_to",
"dask/array/tests/test_array_core.py::test_broadcast_to_array",
"dask/array/tests/test_array_core.py::test_broadcast_to_scalar",
"dask/array/tests/test_array_core.py::test_broadcast_to_chunks",
"dask/array/tests/test_array_core.py::test_broadcast_arrays",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape0-v_shape0]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape1-v_shape1]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape2-v_shape2]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape3-v_shape3]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape4-v_shape4]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape5-v_shape5]",
"dask/array/tests/test_array_core.py::test_broadcast_operator[u_shape6-v_shape6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape0-new_shape0-chunks0]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape1-new_shape1-5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape2-new_shape2-5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape3-new_shape3-12]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape4-new_shape4-12]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape5-new_shape5-chunks5]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape6-new_shape6-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape7-new_shape7-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape8-new_shape8-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape9-new_shape9-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape10-new_shape10-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape11-new_shape11-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape12-new_shape12-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape13-new_shape13-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape14-new_shape14-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape15-new_shape15-2]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape16-new_shape16-chunks16]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape17-new_shape17-3]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape18-new_shape18-4]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape19-new_shape19-chunks19]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape20-new_shape20-1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape21-new_shape21-1]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape22-new_shape22-24]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape23-new_shape23-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape24-new_shape24-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape25-new_shape25-6]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape26-new_shape26-chunks26]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape27-new_shape27-chunks27]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape28-new_shape28-chunks28]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape29-new_shape29-chunks29]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape30-new_shape30-chunks30]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape31-new_shape31-chunks31]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape32-new_shape32-chunks32]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape33-new_shape33-chunks33]",
"dask/array/tests/test_array_core.py::test_reshape[original_shape34-new_shape34-chunks34]",
"dask/array/tests/test_array_core.py::test_reshape_exceptions",
"dask/array/tests/test_array_core.py::test_reshape_splat",
"dask/array/tests/test_array_core.py::test_reshape_fails_for_dask_only",
"dask/array/tests/test_array_core.py::test_reshape_unknown_dimensions",
"dask/array/tests/test_array_core.py::test_full",
"dask/array/tests/test_array_core.py::test_map_blocks",
"dask/array/tests/test_array_core.py::test_map_blocks2",
"dask/array/tests/test_array_core.py::test_map_blocks_with_constants",
"dask/array/tests/test_array_core.py::test_map_blocks_with_kwargs",
"dask/array/tests/test_array_core.py::test_map_blocks_with_chunks",
"dask/array/tests/test_array_core.py::test_map_blocks_dtype_inference",
"dask/array/tests/test_array_core.py::test_from_function_requires_block_args",
"dask/array/tests/test_array_core.py::test_repr",
"dask/array/tests/test_array_core.py::test_slicing_with_ellipsis",
"dask/array/tests/test_array_core.py::test_slicing_with_ndarray",
"dask/array/tests/test_array_core.py::test_dtype",
"dask/array/tests/test_array_core.py::test_blockdims_from_blockshape",
"dask/array/tests/test_array_core.py::test_coerce",
"dask/array/tests/test_array_core.py::test_bool",
"dask/array/tests/test_array_core.py::test_store_delayed_target",
"dask/array/tests/test_array_core.py::test_store",
"dask/array/tests/test_array_core.py::test_store_regions",
"dask/array/tests/test_array_core.py::test_store_compute_false",
"dask/array/tests/test_array_core.py::test_store_locks",
"dask/array/tests/test_array_core.py::test_to_dask_dataframe",
"dask/array/tests/test_array_core.py::test_np_array_with_zero_dimensions",
"dask/array/tests/test_array_core.py::test_dtype_complex",
"dask/array/tests/test_array_core.py::test_astype",
"dask/array/tests/test_array_core.py::test_arithmetic",
"dask/array/tests/test_array_core.py::test_elemwise_consistent_names",
"dask/array/tests/test_array_core.py::test_optimize",
"dask/array/tests/test_array_core.py::test_slicing_with_non_ndarrays",
"dask/array/tests/test_array_core.py::test_getter",
"dask/array/tests/test_array_core.py::test_size",
"dask/array/tests/test_array_core.py::test_nbytes",
"dask/array/tests/test_array_core.py::test_itemsize",
"dask/array/tests/test_array_core.py::test_Array_normalizes_dtype",
"dask/array/tests/test_array_core.py::test_from_array_with_lock",
"dask/array/tests/test_array_core.py::test_from_array_tasks_always_call_getter",
"dask/array/tests/test_array_core.py::test_from_array_no_asarray",
"dask/array/tests/test_array_core.py::test_from_array_getitem",
"dask/array/tests/test_array_core.py::test_from_array_minus_one",
"dask/array/tests/test_array_core.py::test_asarray",
"dask/array/tests/test_array_core.py::test_asanyarray",
"dask/array/tests/test_array_core.py::test_from_func",
"dask/array/tests/test_array_core.py::test_concatenate3_2",
"dask/array/tests/test_array_core.py::test_map_blocks3",
"dask/array/tests/test_array_core.py::test_from_array_with_missing_chunks",
"dask/array/tests/test_array_core.py::test_normalize_chunks",
"dask/array/tests/test_array_core.py::test_raise_on_no_chunks",
"dask/array/tests/test_array_core.py::test_chunks_is_immutable",
"dask/array/tests/test_array_core.py::test_raise_on_bad_kwargs",
"dask/array/tests/test_array_core.py::test_long_slice",
"dask/array/tests/test_array_core.py::test_ellipsis_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing",
"dask/array/tests/test_array_core.py::test_point_slicing_with_full_slice",
"dask/array/tests/test_array_core.py::test_slice_with_floats",
"dask/array/tests/test_array_core.py::test_slice_with_integer_types",
"dask/array/tests/test_array_core.py::test_index_with_integer_types",
"dask/array/tests/test_array_core.py::test_vindex_basic",
"dask/array/tests/test_array_core.py::test_vindex_nd",
"dask/array/tests/test_array_core.py::test_vindex_negative",
"dask/array/tests/test_array_core.py::test_vindex_errors",
"dask/array/tests/test_array_core.py::test_vindex_merge",
"dask/array/tests/test_array_core.py::test_empty_array",
"dask/array/tests/test_array_core.py::test_memmap",
"dask/array/tests/test_array_core.py::test_to_npy_stack",
"dask/array/tests/test_array_core.py::test_view",
"dask/array/tests/test_array_core.py::test_view_fortran",
"dask/array/tests/test_array_core.py::test_map_blocks_with_changed_dimension",
"dask/array/tests/test_array_core.py::test_broadcast_chunks",
"dask/array/tests/test_array_core.py::test_chunks_error",
"dask/array/tests/test_array_core.py::test_array_compute_forward_kwargs",
"dask/array/tests/test_array_core.py::test_dont_fuse_outputs",
"dask/array/tests/test_array_core.py::test_dont_dealias_outputs",
"dask/array/tests/test_array_core.py::test_timedelta_op",
"dask/array/tests/test_array_core.py::test_to_delayed",
"dask/array/tests/test_array_core.py::test_to_delayed_optimize_graph",
"dask/array/tests/test_array_core.py::test_cumulative",
"dask/array/tests/test_array_core.py::test_atop_names",
"dask/array/tests/test_array_core.py::test_atop_new_axes",
"dask/array/tests/test_array_core.py::test_atop_kwargs",
"dask/array/tests/test_array_core.py::test_atop_chunks",
"dask/array/tests/test_array_core.py::test_from_delayed",
"dask/array/tests/test_array_core.py::test_A_property",
"dask/array/tests/test_array_core.py::test_copy_mutate",
"dask/array/tests/test_array_core.py::test_npartitions",
"dask/array/tests/test_array_core.py::test_astype_gh1151",
"dask/array/tests/test_array_core.py::test_elemwise_name",
"dask/array/tests/test_array_core.py::test_map_blocks_name",
"dask/array/tests/test_array_core.py::test_array_picklable",
"dask/array/tests/test_array_core.py::test_from_array_raises_on_bad_chunks",
"dask/array/tests/test_array_core.py::test_concatenate_axes",
"dask/array/tests/test_array_core.py::test_atop_concatenate",
"dask/array/tests/test_array_core.py::test_common_blockdim",
"dask/array/tests/test_array_core.py::test_uneven_chunks_that_fit_neatly",
"dask/array/tests/test_array_core.py::test_elemwise_uneven_chunks",
"dask/array/tests/test_array_core.py::test_uneven_chunks_atop",
"dask/array/tests/test_array_core.py::test_warn_bad_rechunking",
"dask/array/tests/test_array_core.py::test_optimize_fuse_keys",
"dask/array/tests/test_array_core.py::test_concatenate_stack_dont_warn",
"dask/array/tests/test_array_core.py::test_map_blocks_delayed",
"dask/array/tests/test_array_core.py::test_no_chunks",
"dask/array/tests/test_array_core.py::test_no_chunks_2d",
"dask/array/tests/test_array_core.py::test_no_chunks_yes_chunks",
"dask/array/tests/test_array_core.py::test_raise_informative_errors_no_chunks",
"dask/array/tests/test_array_core.py::test_no_chunks_slicing_2d",
"dask/array/tests/test_array_core.py::test_index_array_with_array_1d",
"dask/array/tests/test_array_core.py::test_index_array_with_array_2d",
"dask/array/tests/test_array_core.py::test_setitem_1d",
"dask/array/tests/test_array_core.py::test_setitem_2d",
"dask/array/tests/test_array_core.py::test_setitem_errs",
"dask/array/tests/test_array_core.py::test_zero_slice_dtypes",
"dask/array/tests/test_array_core.py::test_zero_sized_array_rechunk",
"dask/array/tests/test_array_core.py::test_atop_zero_shape",
"dask/array/tests/test_array_core.py::test_atop_zero_shape_new_axes",
"dask/array/tests/test_array_core.py::test_broadcast_against_zero_shape",
"dask/array/tests/test_array_core.py::test_from_array_name",
"dask/array/tests/test_array_core.py::test_concatenate_errs",
"dask/array/tests/test_array_core.py::test_stack_errs",
"dask/array/tests/test_array_core.py::test_atop_with_numpy_arrays",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other0-6]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other1-6]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-100]",
"dask/array/tests/test_array_core.py::test_elemwise_with_lists[other2-6]",
"dask/array/tests/test_array_core.py::test_constructor_plugin",
"dask/array/tests/test_array_core.py::test_no_warnings_on_metadata",
"dask/array/tests/test_array_core.py::test_delayed_array_key_hygeine",
"dask/array/tests/test_array_core.py::test_empty_chunks_in_array_len",
"dask/array/tests/test_array_core.py::test_meta[None]",
"dask/array/tests/test_array_core.py::test_meta[dtype1]"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,308 | 348 | [
"dask/array/core.py"
] |
G-Node__python-odml-269 | c16f9891c4363dfcf907fd7daa076acba4cbe5eb | 2018-03-26 12:56:13 | eeff5922987b064681d1328f81af317d8171808f | diff --git a/odml/property.py b/odml/property.py
index 74e31f7..894296d 100644
--- a/odml/property.py
+++ b/odml/property.py
@@ -22,9 +22,9 @@ class BaseProperty(base.baseobject, Property):
dependency=None, dependency_value=None, dtype=None,
value_origin=None, id=None):
"""
- Create a new Property with a single value. The method will try to infer
- the value's dtype from the type of the value if not explicitly stated.
- Example for a property with
+ Create a new Property. If a value without an explicitly stated dtype
+ has been provided, the method will try to infer the value's dtype.
+ Example:
>>> p = Property("property1", "a string")
>>> p.dtype
>>> str
@@ -34,21 +34,25 @@ class BaseProperty(base.baseobject, Property):
>>> p = Property("prop", [2, 3, 4])
>>> p.dtype
>>> int
- :param name: The name of the property
- :param value: Some data value, this may be a list of homogeneous values
+ :param name: The name of the property.
+ :param value: Some data value, it can be a single value or
+ a list of homogeneous values.
:param unit: The unit of the stored data.
- :param uncertainty: the uncertainty (e.g. the standard deviation)
+ :param uncertainty: The uncertainty (e.g. the standard deviation)
associated with a measure value.
:param reference: A reference (e.g. an URL) to an external definition
of the value.
:param definition: The definition of the property.
:param dependency: Another property this property depends on.
:param dependency_value: Dependency on a certain value.
- :param dtype: the data type of the values stored in the property,
- if dtype is not given, the type is deduced from the values
+ :param dtype: The data type of the values stored in the property,
+ if dtype is not given, the type is deduced from the values.
+ Check odml.DType for supported data types.
:param value_origin: Reference where the value originated from e.g. a file name.
+ :param id: UUID string as specified in RFC 4122. If no id is provided,
+ an id will be generated and assigned. An id has to be unique
+ within an odML Document.
"""
- # TODO validate arguments
try:
if id is not None:
self._id = str(uuid.UUID(id))
@@ -84,7 +88,7 @@ class BaseProperty(base.baseobject, Property):
def new_id(self, id=None):
"""
- new_id sets the id of the current object to a RFC 4122 compliant UUID.
+ new_id sets the id of the current object to an RFC 4122 compliant UUID.
If an id was provided, it is assigned if it is RFC 4122 UUID format compliant.
If no id was provided, a new UUID is generated and assigned.
:param id: UUID string as specified in RFC 4122.
@@ -108,7 +112,7 @@ class BaseProperty(base.baseobject, Property):
@property
def dtype(self):
"""
- The data type of the value
+ The data type of the value. Check odml.DType for supported data types.
"""
return self._dtype
@@ -116,11 +120,9 @@ class BaseProperty(base.baseobject, Property):
def dtype(self, new_type):
"""
If the data type of a property value is changed, it is tried
- to convert the value to the new type.
- If this doesn't work, the change is refused.
-
- This behaviour can be overridden by directly accessing the *_dtype*
- attribute and adjusting the *data* attribute manually.
+ to convert existing values to the new type. If this doesn't work,
+ the change is refused. The dtype can always be changed, if
+ a Property does not contain values.
"""
# check if this is a valid type
if not dtypes.valid_type(new_type):
@@ -139,7 +141,7 @@ class BaseProperty(base.baseobject, Property):
@property
def parent(self):
"""
- The section containing this property
+ The section containing this property.
"""
return self._parent
@@ -170,29 +172,30 @@ class BaseProperty(base.baseobject, Property):
@property
def value(self):
"""
- Returns the value(s) stored in this property. Method always returns a list that
- is a copy (!) of the stored value. Changing this list will NOT change the property.
- For manipulation of the stored values use the append, extend, and direct access methods
- (using brackets).
+ Returns the value(s) stored in this property. Method always returns a list
+ that is a copy (!) of the stored value. Changing this list will NOT change
+ the property.
+ For manipulation of the stored values use the append, extend, and direct
+ access methods (using brackets).
For example:
- >> p = odml.Property("prop", value=[1, 2, 3])
- >> print(p.value)
+ >>> p = odml.Property("prop", value=[1, 2, 3])
+ >>> print(p.value)
[1, 2, 3]
- >> p.value.append(4)
- >> print(p.value)
+ >>> p.value.append(4)
+ >>> print(p.value)
[1, 2, 3]
Individual values can be accessed and manipulated like this:
>>> print(p[0])
[1]
- >> p[0] = 4
- >> print(p[0])
+ >>> p[0] = 4
+ >>> print(p[0])
[4]
The values can be iterated e.g. with a loop:
- >> for v in p.value:
- print(v)
+ >>> for v in p.value:
+ >>> print(v)
4
2
3
@@ -201,18 +204,18 @@ class BaseProperty(base.baseobject, Property):
def value_str(self, index=0):
"""
- Used to access typed data of the value as a string.
- Use data to access the raw type, i.e.:
+ Used to access typed data of the value at a specific
+ index position as a string.
"""
return dtypes.set(self._value[index], self._dtype)
def _validate_values(self, values):
"""
- Method ensures that the passed value(s) can be cast to the
- same dtype, i.e. that associated with this property or the
- inferred dtype of the first entry of the values list.
+ Method ensures that the passed value(s) can be cast to the
+ same dtype, i.e. that are associated with this property or the
+ inferred dtype of the first entry of the values list.
- :param values an iterable that contains the values
+ :param values: an iterable that contains the values.
"""
for v in values:
try:
@@ -227,7 +230,7 @@ class BaseProperty(base.baseobject, Property):
If new_value is a string, it will convert it to a list of
strings if the new_value contains embracing brackets.
- returns list of new_value
+ :return: list of new_value
"""
if isinstance(new_value, str):
if new_value[0] == "[" and new_value[-1] == "]":
@@ -241,21 +244,22 @@ class BaseProperty(base.baseobject, Property):
elif not isinstance(new_value, list):
new_value = [new_value]
else:
- raise ValueError("odml.Property._convert_value_input: unsupported data type for values: %s" % type(new_value))
+ raise ValueError("odml.Property._convert_value_input: "
+ "unsupported data type for values: %s" % type(new_value))
return new_value
@value.setter
def value(self, new_value):
"""
-
Set the value of the property discarding any previous information.
Method will try to convert the passed value to the dtype of
- the property and raise an ValueError, if not possible
+ the property and raise an ValueError if not possible.
- :param new_value a single value or list of values.
+ :param new_value: a single value or list of values.
"""
# Make sure boolean value 'False' gets through as well...
- if new_value is None or (isinstance(new_value, (list, tuple, str)) and len(new_value) == 0):
+ if new_value is None or \
+ (isinstance(new_value, (list, tuple, str)) and len(new_value) == 0):
self._value = []
return
@@ -285,6 +289,8 @@ class BaseProperty(base.baseobject, Property):
@uncertainty.setter
def uncertainty(self, new_value):
+ if new_value == "":
+ new_value = None
self._uncertainty = new_value
@property
@@ -339,9 +345,9 @@ class BaseProperty(base.baseobject, Property):
def remove(self, value):
"""
- Remove a value from this property and unset its parent.
- Raises a TypeError if this would cause the property not to hold any
- value at all. This can be circumvented by using the *_values* property.
+ Remove a value from this property. Only the first encountered
+ occurrence of the passed in value is removed from the properties
+ list of values.
"""
if value in self._value:
self._value.remove(value)
@@ -358,6 +364,7 @@ class BaseProperty(base.baseobject, Property):
def clone(self):
"""
Clone this object to copy it independently to another document.
+ The id of the cloned object will be set to a different uuid.
"""
obj = super(BaseProperty, self).clone()
obj._parent = None
@@ -367,23 +374,23 @@ class BaseProperty(base.baseobject, Property):
return obj
def merge(self, other, strict=True):
- """Merges the property 'other' into self, if possible. Information
- will be synchronized. Method will raise an ValueError when the
+ """
+ Merges the property 'other' into self, if possible. Information
+ will be synchronized. Method will raise a ValueError when the
information in this property and the passed property are in
conflict.
- :param other a Property
- :param strict Bool value to indicate whether types should be
- implicitly converted even when information may be lost. Default is True, i.e. no conversion, and error will be raised if types do not match.
-
+ :param other: an odML Property.
+ :param strict: Bool value to indicate whether types should be implicitly converted
+ even when information may be lost. Default is True, i.e. no conversion,
+ and a ValueError will be raised if types do not match.
"""
- assert(isinstance(other, (BaseProperty)))
+ assert(isinstance(other, BaseProperty))
if strict and self.dtype != other.dtype:
raise ValueError("odml.Property.merge: src and dest dtypes do not match!")
if self.unit is not None and other.unit is not None and self.unit != other.unit:
- raise ValueError("odml.Property.merge: src and dest units (%s, %s) do not match!"
- % (other.unit, self.unit))
+ raise ValueError("odml.Property.merge: src and dest units (%s, %s) do not match!" % (other.unit, self.unit))
if self.definition is not None and other.definition is not None:
self_def = ''.join(map(str.strip, self.definition.split())).lower()
@@ -422,14 +429,14 @@ class BaseProperty(base.baseobject, Property):
def unmerge(self, other):
"""
- Stub that doesn't do anything for this class
+ Stub that doesn't do anything for this class.
"""
pass
def get_merged_equivalent(self):
"""
- Return the merged object (i.e. if the section is linked to another one,
- return the corresponding property of the linked section) or None
+ Return the merged object (i.e. if the parent section is linked to another one,
+ return the corresponding property of the linked section) or None.
"""
if self.parent is None or self.parent._merged is None:
return None
@@ -466,17 +473,18 @@ class BaseProperty(base.baseobject, Property):
def extend(self, obj, strict=True):
"""
- Extend the list of values stored in this property by the passed values. Method will
- raise an ValueError, if values cannot be converted to the current dtype. One can also pass
- another Property to append all values stored in that one. In this case units must match!
+ Extend the list of values stored in this property by the passed values. Method
+ will raise a ValueError, if values cannot be converted to the current dtype.
+ One can also pass another Property to append all values stored in that one.
+ In this case units must match!
- :param obj single value, list of values or Property
- :param strict a Bool that controls whether dtypes must match. Default is True.
+ :param obj: single value, list of values or a Property.
+ :param strict: a Bool that controls whether dtypes must match. Default is True.
"""
if isinstance(obj, BaseProperty):
- if (obj.unit != self.unit):
- raise ValueError("odml.Property.append: src and dest units (%s, %s) do not match!"
- % (obj.unit, self.unit))
+ if obj.unit != self.unit:
+ raise ValueError("odml.Property.extend: src and dest units (%s, %s) "
+ "do not match!" % (obj.unit, self.unit))
self.extend(obj.value)
return
@@ -486,29 +494,41 @@ class BaseProperty(base.baseobject, Property):
new_value = self._convert_value_input(obj)
if len(new_value) > 0 and strict and dtypes.infer_dtype(new_value[0]) != self.dtype:
- raise ValueError("odml.Property.extend: passed value data type does not match dtype!");
+ raise ValueError("odml.Property.extend: "
+ "passed value data type does not match dtype!")
if not self._validate_values(new_value):
- raise ValueError("odml.Property.append: passed value(s) cannot be converted to "
- "data type \'%s\'!" % self._dtype)
+ raise ValueError("odml.Property.extend: passed value(s) cannot be converted "
+ "to data type \'%s\'!" % self._dtype)
self._value.extend([dtypes.get(v, self.dtype) for v in new_value])
def append(self, obj, strict=True):
"""
- Append a single value to the list of stored values. Method will raise an ValueError if
- the passed value cannot be converted to the current dtype.
+ Append a single value to the list of stored values. Method will raise
+ a ValueError if the passed value cannot be converted to the current dtype.
- :param obj the additional value.
- :param strict a Bool that controls whether dtypes must match. Default is True.
+ :param obj: the additional value.
+ :param strict: a Bool that controls whether dtypes must match. Default is True.
"""
+ # Ignore empty values before nasty stuff happens, but make sure
+ # 0 and False get through.
+ if obj in [None, "", [], {}]:
+ return
+
+ if not self.value:
+ self.value = obj
+ return
+
new_value = self._convert_value_input(obj)
if len(new_value) > 1:
raise ValueError("odml.property.append: Use extend to add a list of values!")
+
if len(new_value) > 0 and strict and dtypes.infer_dtype(new_value[0]) != self.dtype:
- raise ValueError("odml.Property.extend: passed value data type does not match dtype!");
+ raise ValueError("odml.Property.append: "
+ "passed value data type does not match dtype!")
if not self._validate_values(new_value):
- raise ValueError("odml.Property.append: passed value(s) cannot be converted to "
- "data type \'%s\'!" % self._dtype)
- self._value.append(dtypes.get(new_value[0], self.dtype))
+ raise ValueError("odml.Property.append: passed value(s) cannot be converted "
+ "to data type \'%s\'!" % self._dtype)
+ self._value.append(dtypes.get(new_value[0], self.dtype))
| Property.append returns dtype error on unset dtype
When using `Property.append` of a Property where neither value nor dtype are set, a dtype mismatch related ValueError is raised. | G-Node/python-odml | diff --git a/test/test_property.py b/test/test_property.py
index d0dc673..f0aa976 100644
--- a/test/test_property.py
+++ b/test/test_property.py
@@ -10,26 +10,67 @@ class TestProperty(unittest.TestCase):
def setUp(self):
pass
+ def test_simple_attributes(self):
+ p_name = "propertyName"
+ p_origin = "from over there"
+ p_unit = "pears"
+ p_uncertainty = "+-12"
+ p_ref = "4 8 15 16 23"
+ p_def = "an odml test property"
+ p_dep = "yes"
+ p_dep_val = "42"
+
+ prop = Property(name=p_name, value_origin=p_origin, unit=p_unit,
+ uncertainty=p_uncertainty, reference=p_ref, definition=p_def,
+ dependency=p_dep, dependency_value=p_dep_val)
+
+ self.assertEqual(prop.name, p_name)
+ self.assertEqual(prop.value_origin, p_origin)
+ self.assertEqual(prop.unit, p_unit)
+ self.assertEqual(prop.uncertainty, p_uncertainty)
+ self.assertEqual(prop.reference, p_ref)
+ self.assertEqual(prop.definition, p_def)
+ self.assertEqual(prop.dependency, p_dep)
+ self.assertEqual(prop.dependency_value, p_dep_val)
+
+ # Test setting attributes
+ prop.name = "%s_edit" % p_name
+ self.assertEqual(prop.name, "%s_edit" % p_name)
+ prop.value_origin = "%s_edit" % p_origin
+ self.assertEqual(prop.value_origin, "%s_edit" % p_origin)
+ prop.unit = "%s_edit" % p_unit
+ self.assertEqual(prop.unit, "%s_edit" % p_unit)
+ prop.uncertainty = "%s_edit" % p_uncertainty
+ self.assertEqual(prop.uncertainty, "%s_edit" % p_uncertainty)
+ prop.reference = "%s_edit" % p_ref
+ self.assertEqual(prop.reference, "%s_edit" % p_ref)
+ prop.definition = "%s_edit" % p_def
+ self.assertEqual(prop.definition, "%s_edit" % p_def)
+ prop.dependency = "%s_edit" % p_dep
+ self.assertEqual(prop.dependency, "%s_edit" % p_dep)
+ prop.dependency_value = "%s_edit" % p_dep_val
+ self.assertEqual(prop.dependency_value, "%s_edit" % p_dep_val)
+
+ # Test setting attributes to None when '' is passed.
+ prop.value_origin = ""
+ self.assertIsNone(prop.value_origin)
+ prop.unit = ""
+ self.assertIsNone(prop.unit)
+ prop.uncertainty = ""
+ self.assertIsNone(prop.uncertainty)
+ prop.reference = ""
+ self.assertIsNone(prop.reference)
+ prop.definition = ""
+ self.assertIsNone(prop.definition)
+ prop.dependency = ""
+ self.assertIsNone(prop.dependency)
+ prop.dependency_value = ""
+ self.assertIsNone(prop.dependency_value)
+
def test_value(self):
p = Property("property", 100)
self.assertEqual(p.value[0], 100)
- self.assertEqual(type(p.value), list)
-
- p.append(10)
- self.assertEqual(len(p), 2)
- self.assertRaises(ValueError, p.append, [1, 2, 3])
-
- p.extend([20, 30, '40'])
- self.assertEqual(len(p), 5)
- with self.assertRaises(ValueError):
- p.append('invalid')
- with self.assertRaises(ValueError):
- p.extend(('5', 6, 7))
-
- p2 = Property("property 2", 3)
- self.assertRaises(ValueError, p.append, p2)
- p.extend(p2)
- self.assertEqual(len(p), 6)
+ self.assertIsInstance(p.value, list)
p.value = None
self.assertEqual(len(p), 0)
@@ -46,42 +87,178 @@ class TestProperty(unittest.TestCase):
p.value = ()
self.assertEqual(len(p), 0)
- p3 = Property("test", value=2, unit="Hz")
- p4 = Property("test", value=5.5, unit="s")
+ p.value.append(5)
+ self.assertEqual(len(p.value), 0)
+
+ p2 = Property("test", {"name": "Marie", "name": "Johanna"})
+ self.assertEqual(len(p2), 1)
+
+ # Test tuple dtype value.
+ t = Property(name="Location", value='(39.12; 67.19)', dtype='2-tuple')
+ tuple_value = t.value[0] # As the formed tuple is a list of list
+ self.assertEqual(tuple_value[0], '39.12')
+ self.assertEqual(tuple_value[1], '67.19')
+ # Test invalid tuple length
with self.assertRaises(ValueError):
- p3.append(p4)
+ _ = Property(name="Public-Key", value='(5689; 1254; 687)', dtype='2-tuple')
- p.value.append(5)
- self.assertEqual(len(p.value), 0)
- self.assertRaises(ValueError, p.append, 5.5)
+ def test_value_append(self):
+ # Test append w/o Property value or dtype
+ prop = Property(name="append")
+ prop.append(1)
+ self.assertEqual(prop.dtype, DType.int)
+ self.assertEqual(prop.value, [1])
+
+ # Test append with Property dtype.
+ prop = Property(name="append", dtype="int")
+ prop.append(3)
+ self.assertEqual(prop.value, [3])
+
+ # Test append with Property value
+ prop = Property(name="append", value=[1, 2])
+ prop.append(3)
+ self.assertEqual(prop.value, [1, 2, 3])
+
+ # Test append with Property list value
+ prop = Property(name="append", value=[1, 2])
+ prop.append([3])
+ self.assertEqual(prop.value, [1, 2, 3])
+
+ # Test append of empty values, make sure 0 and False are properly handled
+ prop = Property(name="append")
+ prop.append(None)
+ prop.append("")
+ prop.append([])
+ prop.append({})
+ self.assertEqual(prop.value, [])
+
+ prop.append(0)
+ self.assertEqual(prop.value, [0])
+
+ prop.value = None
+ prop.dtype = None
+ prop.append(False)
+ self.assertEqual(prop.value, [False])
+
+ prop = Property(name="append", value=[1, 2])
+ prop.append(None)
+ prop.append("")
+ prop.append([])
+ prop.append({})
+ self.assertEqual(prop.value, [1, 2])
- p.append(5.5, strict=False)
- self.assertEqual(len(p), 1)
+ prop.append(0)
+ self.assertEqual(prop.value, [1, 2, 0])
- self.assertRaises(ValueError, p.extend, [3.14, 6.28])
- p.extend([3.14, 6.28], strict=False)
- self.assertEqual(len(p), 3)
+ # Test fail append with multiple values
+ prop = Property(name="append", value=[1, 2, 3])
+ with self.assertRaises(ValueError):
+ prop.append([4, 5])
+ self.assertEqual(prop.value, [1, 2, 3])
+
+ # Test fail append with mismatching dtype
+ prop = Property(name="append", value=[1, 2], dtype="int")
+ with self.assertRaises(ValueError):
+ prop.append([3.14])
+ with self.assertRaises(ValueError):
+ prop.append([True])
+ with self.assertRaises(ValueError):
+ prop.append(["5.927"])
+ self.assertEqual(prop.value, [1, 2])
+
+ # Test strict flag
+ prop.append(3.14, strict=False)
+ prop.append(True, strict=False)
+ prop.append("5.927", strict=False)
+ self.assertEqual(prop.value, [1, 2, 3, 1, 5])
+
+ # Make sure non-convertible values still raise an error
+ with self.assertRaises(ValueError):
+ prop.append("invalid")
+ self.assertEqual(prop.value, [1, 2, 3, 1, 5])
p5 = Property("test", value="a string")
p5.append("Freude")
self.assertEqual(len(p5), 2)
self.assertRaises(ValueError, p5.append, "[a, b, c]")
- p5.extend("[a, b, c]")
- self.assertEqual(len(p5), 5)
- p6 = Property("test", {"name": "Marie", "name": "Johanna"})
- self.assertEqual(len(p6), 1)
+ def test_value_extend(self):
+ prop = Property(name="extend")
- # Test tuple dtype value.
- t = Property(name="Location", value='(39.12; 67.19)', dtype='2-tuple')
- tuple_value = t.value[0] # As the formed tuple is a list of list
- self.assertEqual(tuple_value[0], '39.12')
- self.assertEqual(tuple_value[1], '67.19')
+ # Test extend w/o Property value or dtype.
+ val = [1, 2, 3]
+ prop.extend(val)
+ self.assertEqual(prop.dtype, DType.int)
+ self.assertEqual(prop.value, val)
- # Test invalid tuple length
+ # Extend with single value.
+ prop.extend(4)
+ self.assertEqual(prop.value, [1, 2, 3, 4])
+
+ # Extend with list value.
+ prop.extend([5, 6])
+ self.assertEqual(prop.value, [1, 2, 3, 4, 5, 6])
+
+ # Test extend w/o Property value
+ prop = Property(name="extend", dtype="float")
+ prop.extend([1.0, 2.0, 3.0])
+ self.assertEqual(prop.value, [1.0, 2.0, 3.0])
+
+ # Test extend with Property value
+ prop = Property(name="extend", value=10)
+ prop.extend([20, 30, '40'])
+ self.assertEqual(prop.value, [10, 20, 30, 40])
+
+ # Test extend fail with mismatching dtype
with self.assertRaises(ValueError):
- _ = Property(name="Public-Key", value='(5689; 1254; 687)', dtype='2-tuple')
+ prop.extend(['5', 6, 7])
+ with self.assertRaises(ValueError):
+ prop.extend([5, 6, 'a'])
+
+ # Test extend via Property
+ prop = Property(name="extend", value=["a", "b"])
+ ext_prop = Property(name="value extend", value="c")
+ prop.extend(ext_prop)
+ self.assertEqual(prop.value, ["a", "b", "c"])
+
+ ext_prop.value = ["d", "e"]
+ prop.extend(ext_prop)
+ self.assertEqual(prop.value, ["a", "b", "c", "d", "e"])
+
+ ext_prop = Property(name="value extend", value=[1, 2 ,3])
+ with self.assertRaises(ValueError):
+ prop.extend(ext_prop)
+ self.assertEqual(prop.value, ["a", "b", "c", "d", "e"])
+
+ # Test extend via Property unit check
+ prop = Property(name="extend", value=[1, 2], unit="mV")
+ ext_prop = Property(name="extend", value=[3, 4], unit="mV")
+ prop.extend(ext_prop)
+ self.assertEqual(prop.value, [1, 2, 3, 4])
+
+ ext_prop.unit = "kV"
+ with self.assertRaises(ValueError):
+ prop.extend(ext_prop)
+ self.assertEqual(prop.value, [1, 2, 3, 4])
+
+ ext_prop.unit = ""
+ with self.assertRaises(ValueError):
+ prop.extend(ext_prop)
+ self.assertEqual(prop.value, [1, 2, 3, 4])
+
+ # Test strict flag
+ prop = Property(name="extend", value=[1, 2], dtype="int")
+ with self.assertRaises(ValueError):
+ prop.extend([3.14, True, "5.927"])
+ self.assertEqual(prop.value, [1, 2])
+
+ prop.extend([3.14, True, "5.927"], strict=False)
+ self.assertEqual(prop.value, [1, 2, 3, 1, 5])
+
+ # Make sure non-convertible values still raise an error
+ with self.assertRaises(ValueError):
+ prop.extend([6, "some text"])
def test_get_set_value(self):
values = [1, 2, 3, 4, 5]
@@ -150,9 +327,6 @@ class TestProperty(unittest.TestCase):
assert(p.dtype == 'string')
assert(p.value == ['7', '20', '1 Dog', 'Seven'])
- def test_name(self):
- pass
-
def test_parent(self):
p = Property("property_section", parent=Section("S"))
self.assertIsInstance(p.parent, BaseSection)
@@ -206,6 +380,12 @@ class TestProperty(unittest.TestCase):
with self.assertRaises(AttributeError):
prop.dtype = "x-tuple"
+ # Test not setting None when a property contains values.
+ prop.value = [1, 2, 3]
+ self.assertIsNotNone(prop.dtype)
+ prop.dtype = None
+ self.assertIsNotNone(prop.dtype)
+
def test_get_path(self):
doc = Document()
sec = Section(name="parent", parent=doc)
@@ -218,14 +398,6 @@ class TestProperty(unittest.TestCase):
prop.parent = sec
self.assertEqual("/%s:%s" % (sec.name, prop.name), prop.get_path())
- def test_value_origin(self):
- p = Property("P")
- self.assertEqual(p.value_origin, None)
- p = Property("P", value_origin="V")
- self.assertEqual(p.value_origin, "V")
- p.value_origin = ""
- self.assertEqual(p.value_origin, None)
-
def test_id(self):
p = Property(name="P")
self.assertIsNotNone(p.id)
diff --git a/test/test_property_integration.py b/test/test_property_integration.py
index 479883f..cf30d59 100644
--- a/test/test_property_integration.py
+++ b/test/test_property_integration.py
@@ -106,6 +106,7 @@ class TestPropertyIntegration(unittest.TestCase):
self.assertEqual(jprop.unit, p_unit)
self.assertEqual(jprop.uncertainty, p_uncertainty)
self.assertEqual(jprop.reference, p_ref)
+ self.assertEqual(jprop.definition, p_def)
self.assertEqual(jprop.dependency, p_dep)
self.assertEqual(jprop.dependency_value, p_dep_val)
@@ -116,6 +117,7 @@ class TestPropertyIntegration(unittest.TestCase):
self.assertEqual(xprop.unit, p_unit)
self.assertEqual(xprop.uncertainty, p_uncertainty)
self.assertEqual(xprop.reference, p_ref)
+ self.assertEqual(xprop.definition, p_def)
self.assertEqual(xprop.dependency, p_dep)
self.assertEqual(xprop.dependency_value, p_dep_val)
@@ -126,5 +128,6 @@ class TestPropertyIntegration(unittest.TestCase):
self.assertEqual(yprop.unit, p_unit)
self.assertEqual(yprop.uncertainty, p_uncertainty)
self.assertEqual(yprop.reference, p_ref)
+ self.assertEqual(yprop.definition, p_def)
self.assertEqual(yprop.dependency, p_dep)
self.assertEqual(yprop.dependency_value, p_dep_val)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "lxml enum34 pyyaml rdflib",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libxml2-dev libxslt1-dev lib32z1-dev"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
brotlipy==0.7.0
certifi==2021.5.30
cffi @ file:///tmp/build/80754af9/cffi_1625814693874/work
charset-normalizer @ file:///tmp/build/80754af9/charset-normalizer_1630003229654/work
cryptography @ file:///tmp/build/80754af9/cryptography_1635366128178/work
html5lib @ file:///Users/ktietz/demo/mc3/conda-bld/html5lib_1629144453894/work
idna @ file:///tmp/build/80754af9/idna_1637925883363/work
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate @ file:///Users/ktietz/demo/mc3/conda-bld/isodate_1630584690429/work
keepalive @ file:///home/conda/feedstock_root/build_artifacts/keepalive_1635948558527/work
lxml @ file:///tmp/build/80754af9/lxml_1616442911898/work
-e git+https://github.com/G-Node/python-odml.git@c16f9891c4363dfcf907fd7daa076acba4cbe5eb#egg=odML
packaging==21.3
pluggy==1.0.0
py==1.11.0
pycparser @ file:///tmp/build/80754af9/pycparser_1636541352034/work
pyOpenSSL @ file:///opt/conda/conda-bld/pyopenssl_1643788558760/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
PySocks @ file:///tmp/build/80754af9/pysocks_1605305763431/work
pytest==7.0.1
PyYAML==5.4.1
rdflib @ file:///home/conda/feedstock_root/build_artifacts/rdflib_1610581402529/work
requests @ file:///opt/conda/conda-bld/requests_1641824580448/work
six @ file:///tmp/build/80754af9/six_1644875935023/work
SPARQLWrapper @ file:///home/conda/feedstock_root/build_artifacts/sparqlwrapper_1629916978493/work
tomli==1.2.3
typing_extensions==4.1.1
urllib3 @ file:///opt/conda/conda-bld/urllib3_1643638302206/work
webencodings==0.5.1
zipp==3.6.0
| name: python-odml
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- brotlipy=0.7.0=py36h27cfd23_1003
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- cffi=1.14.6=py36h400218f_0
- charset-normalizer=2.0.4=pyhd3eb1b0_0
- cryptography=35.0.0=py36hd23ed53_0
- enum34=1.1.10=py36h06a4308_0
- html5lib=1.1=pyhd3eb1b0_0
- icu=58.2=he6710b0_3
- idna=3.3=pyhd3eb1b0_0
- isodate=0.6.0=pyhd3eb1b0_1
- keepalive=0.5=pyhd8ed1ab_6
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- libxml2=2.9.14=h74e7548_0
- libxslt=1.1.35=h4e12654_0
- lxml=4.6.3=py36h9120a33_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- pycparser=2.21=pyhd3eb1b0_0
- pyopenssl=22.0.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pysocks=1.7.1=py36h06a4308_0
- python=3.6.13=h12debd9_1
- python_abi=3.6=2_cp36m
- pyyaml=5.4.1=py36h27cfd23_1
- rdflib=5.0.0=py36h5fab9bb_3
- readline=8.2=h5eee18b_0
- requests=2.27.1=pyhd3eb1b0_0
- setuptools=58.0.4=py36h06a4308_0
- six=1.16.0=pyhd3eb1b0_1
- sparqlwrapper=1.8.5=py36h5fab9bb_1006
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- urllib3=1.26.8=pyhd3eb1b0_0
- webencodings=0.5.1=py36_1
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- yaml=0.2.5=h7b6447c_0
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/python-odml
| [
"test/test_property.py::TestProperty::test_simple_attributes",
"test/test_property.py::TestProperty::test_value_append"
] | [] | [
"test/test_property.py::TestProperty::test_bool_conversion",
"test/test_property.py::TestProperty::test_clone",
"test/test_property.py::TestProperty::test_dtype",
"test/test_property.py::TestProperty::test_get_merged_equivalent",
"test/test_property.py::TestProperty::test_get_path",
"test/test_property.py::TestProperty::test_get_set_value",
"test/test_property.py::TestProperty::test_id",
"test/test_property.py::TestProperty::test_merge",
"test/test_property.py::TestProperty::test_new_id",
"test/test_property.py::TestProperty::test_parent",
"test/test_property.py::TestProperty::test_str_to_int_convert",
"test/test_property.py::TestProperty::test_value",
"test/test_property.py::TestProperty::test_value_extend",
"test/test_property_integration.py::TestPropertyIntegration::test_id",
"test/test_property_integration.py::TestPropertyIntegration::test_simple_attributes"
] | [] | BSD 4-Clause "Original" or "Old" License | 2,332 | 3,881 | [
"odml/property.py"
] |
|
tornadoweb__tornado-2338 | 35a538f50e704e348926e1b113bc03328a1da9f2 | 2018-03-31 22:11:44 | 6410cd98c1a5e938246a17cac0769f689ed471c5 | diff --git a/tornado/ioloop.py b/tornado/ioloop.py
index f6ec177b..48700139 100644
--- a/tornado/ioloop.py
+++ b/tornado/ioloop.py
@@ -1213,11 +1213,31 @@ class PeriodicCallback(object):
def _schedule_next(self):
if self._running:
- current_time = self.io_loop.time()
-
- if self._next_timeout <= current_time:
- callback_time_sec = self.callback_time / 1000.0
- self._next_timeout += (math.floor((current_time - self._next_timeout) /
- callback_time_sec) + 1) * callback_time_sec
-
+ self._update_next(self.io_loop.time())
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
+
+ def _update_next(self, current_time):
+ callback_time_sec = self.callback_time / 1000.0
+ if self._next_timeout <= current_time:
+ # The period should be measured from the start of one call
+ # to the start of the next. If one call takes too long,
+ # skip cycles to get back to a multiple of the original
+ # schedule.
+ self._next_timeout += (math.floor((current_time - self._next_timeout) /
+ callback_time_sec) + 1) * callback_time_sec
+ else:
+ # If the clock moved backwards, ensure we advance the next
+ # timeout instead of recomputing the same value again.
+ # This may result in long gaps between callbacks if the
+ # clock jumps backwards by a lot, but the far more common
+ # scenario is a small NTP adjustment that should just be
+ # ignored.
+ #
+ # Note that on some systems if time.time() runs slower
+ # than time.monotonic() (most common on windows), we
+ # effectively experience a small backwards time jump on
+ # every iteration because PeriodicCallback uses
+ # time.time() while asyncio schedules callbacks using
+ # time.monotonic().
+ # https://github.com/tornadoweb/tornado/issues/2333
+ self._next_timeout += callback_time_sec
| ioloop: PeriodicCallback executes too often on windows
## Here is the code:
import math
import logging
from crontab import CronTab
from tornado.ioloop import PeriodicCallback, IOLoop
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class CronTabCallback(PeriodicCallback):
def __init__(self, callback, schedule):
self._callback = callback
self._crontab = CronTab(schedule)
super(CronTabCallback, self).__init__(self.run, self._calc_callbacktime())
def _calc_callbacktime(self, now=None):
return math.ceil(self._crontab.next(now)) * 1000.0
def run(self):
return self._callback()
def _schedule_next(self):
self.callback_time = self._calc_callbacktime()
logging.info('calc ---------------------')
logging.info('delay %s' % self.callback_time)
logging.info('last execute %s' % self._next_timeout)
last = self._next_timeout
super(CronTabCallback, self)._schedule_next()
if last == self._next_timeout:
logging.error('error !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
logging.info('current: %s' % self.io_loop.time())
logging.info('calc next: %s' % self._next_timeout)
logging.info('--------------------------\n')
def crontab(schedule):
def decorator(func):
CronTabCallback(func, schedule).start()
return func
return decorator
@crontab('*/1 * * * *')
def run():
logging.info('execute ... \n')
if __name__ == '__main__':
IOLoop.current().start()
## Here is the console log
2018-03-30 11:33:00,311 - asyncio - DEBUG - Using selector: SelectSelector
2018-03-30 11:33:00,316 - root - INFO - calc ---------------------
2018-03-30 11:33:00,316 - root - INFO - delay 60000.0
2018-03-30 11:33:00,316 - root - INFO - last execute 1522380780.3169544
2018-03-30 11:33:00,316 - root - INFO - current: 1522380780.3169544
2018-03-30 11:33:00,316 - root - INFO - **calc next: 1522380840.3169544**
2018-03-30 11:33:00,316 - root - INFO - --------------------------
**2018-03-30 11:34:00,313** - root - INFO - execute ...
2018-03-30 11:34:00,313 - root - INFO - calc ---------------------
2018-03-30 11:34:00,313 - root - INFO - delay 60000.0
2018-03-30 11:34:00,313 - root - INFO - last execute 1522380840.3169544
2018-03-30 11:34:00,313 - root - ERROR - error !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2018-03-30 11:34:00,313 - root - INFO - current: 1522380840.3139544
2018-03-30 11:34:00,313 - root - INFO - calc next: 1522380840.3169544
2018-03-30 11:34:00,313 - root - INFO - --------------------------
2018-03-30 11:34:00,318 - root - INFO - execute ...
2018-03-30 11:34:00,318 - root - INFO - calc ---------------------
2018-03-30 11:34:00,318 - root - INFO - delay 60000.0
2018-03-30 11:34:00,318 - root - INFO - last execute 1522380840.3169544
2018-03-30 11:34:00,318 - root - INFO - current: 1522380840.3189545
2018-03-30 11:34:00,318 - root - INFO - calc next: 1522380900.3169544
2018-03-30 11:34:00,318 - root - INFO - --------------------------
## Environment:
OS: windows 7
Python: python 3.6
Dependent library: crontab 0.22.0
Tornado 4.5.1 python2 (ok) python3(ok)
Tornado 5.0.1 python2 (ok) python3 (linux ok, window has issue) | tornadoweb/tornado | diff --git a/tornado/test/ioloop_test.py b/tornado/test/ioloop_test.py
index 09f71c5d..9f7c1847 100644
--- a/tornado/test/ioloop_test.py
+++ b/tornado/test/ioloop_test.py
@@ -789,6 +789,62 @@ class TestPeriodicCallback(unittest.TestCase):
io_loop.close()
+class TestPeriodicCallbackMath(unittest.TestCase):
+ def simulate_calls(self, pc, durations):
+ """Simulate a series of calls to the PeriodicCallback.
+
+ Pass a list of call durations in seconds (negative values
+ work to simulate clock adjustments during the call, or more or
+ less equivalently, between calls). This method returns the
+ times at which each call would be made.
+ """
+ calls = []
+ now = 1000
+ pc._next_timeout = now
+ for d in durations:
+ pc._update_next(now)
+ calls.append(pc._next_timeout)
+ now = pc._next_timeout + d
+ return calls
+
+ def test_basic(self):
+ pc = PeriodicCallback(None, 10000)
+ self.assertEqual(self.simulate_calls(pc, [0] * 5),
+ [1010, 1020, 1030, 1040, 1050])
+
+ def test_overrun(self):
+ # If a call runs for too long, we skip entire cycles to get
+ # back on schedule.
+ call_durations = [9, 9, 10, 11, 20, 20, 35, 35, 0, 0, 0]
+ expected = [
+ 1010, 1020, 1030, # first 3 calls on schedule
+ 1050, 1070, # next 2 delayed one cycle
+ 1100, 1130, # next 2 delayed 2 cycles
+ 1170, 1210, # next 2 delayed 3 cycles
+ 1220, 1230, # then back on schedule.
+ ]
+
+ pc = PeriodicCallback(None, 10000)
+ self.assertEqual(self.simulate_calls(pc, call_durations),
+ expected)
+
+ def test_clock_backwards(self):
+ pc = PeriodicCallback(None, 10000)
+ # Backwards jumps are ignored, potentially resulting in a
+ # slightly slow schedule (although we assume that when
+ # time.time() and time.monotonic() are different, time.time()
+ # is getting adjusted by NTP and is therefore more accurate)
+ self.assertEqual(self.simulate_calls(pc, [-2, -1, -3, -2, 0]),
+ [1010, 1020, 1030, 1040, 1050])
+
+ # For big jumps, we should perhaps alter the schedule, but we
+ # don't currently. This trace shows that we run callbacks
+ # every 10s of time.time(), but the first and second calls are
+ # 110s of real time apart because the backwards jump is
+ # ignored.
+ self.assertEqual(self.simulate_calls(pc, [-100, 0, 0]),
+ [1010, 1020, 1030])
+
class TestIOLoopConfiguration(unittest.TestCase):
def run_python(self, *statements):
statements = [
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | 5.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
-e git+https://github.com/tornadoweb/tornado.git@35a538f50e704e348926e1b113bc03328a1da9f2#egg=tornado
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tornado
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- flake8==5.0.4
- importlib-metadata==4.2.0
- mccabe==0.7.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
prefix: /opt/conda/envs/tornado
| [
"tornado/test/ioloop_test.py::TestPeriodicCallbackMath::test_basic",
"tornado/test/ioloop_test.py::TestPeriodicCallbackMath::test_clock_backwards",
"tornado/test/ioloop_test.py::TestPeriodicCallbackMath::test_overrun"
] | [] | [
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_from_signal",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_from_signal_other_thread",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_return_sequence",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_wakeup",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_wakeup_other_thread",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_callback_while_closing",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_timeout_return",
"tornado/test/ioloop_test.py::TestIOLoop::test_add_timeout_timedelta",
"tornado/test/ioloop_test.py::TestIOLoop::test_call_at_return",
"tornado/test/ioloop_test.py::TestIOLoop::test_call_later_return",
"tornado/test/ioloop_test.py::TestIOLoop::test_close_file_object",
"tornado/test/ioloop_test.py::TestIOLoop::test_exception_logging",
"tornado/test/ioloop_test.py::TestIOLoop::test_exception_logging_future",
"tornado/test/ioloop_test.py::TestIOLoop::test_exception_logging_native_coro",
"tornado/test/ioloop_test.py::TestIOLoop::test_handle_callback_exception",
"tornado/test/ioloop_test.py::TestIOLoop::test_handler_callback_file_object",
"tornado/test/ioloop_test.py::TestIOLoop::test_mixed_fd_fileobj",
"tornado/test/ioloop_test.py::TestIOLoop::test_multiple_add",
"tornado/test/ioloop_test.py::TestIOLoop::test_read_while_writeable",
"tornado/test/ioloop_test.py::TestIOLoop::test_reentrant",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_handler_from_handler",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_timeout_after_fire",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_timeout_cleanup",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_timeout_from_timeout",
"tornado/test/ioloop_test.py::TestIOLoop::test_remove_without_add",
"tornado/test/ioloop_test.py::TestIOLoop::test_spawn_callback",
"tornado/test/ioloop_test.py::TestIOLoop::test_timeout_with_arguments",
"tornado/test/ioloop_test.py::TestIOLoopCurrent::test_default_current",
"tornado/test/ioloop_test.py::TestIOLoopCurrent::test_force_current",
"tornado/test/ioloop_test.py::TestIOLoopCurrent::test_non_current",
"tornado/test/ioloop_test.py::TestIOLoopCurrentAsync::test_clear_without_current",
"tornado/test/ioloop_test.py::TestIOLoopAddCallback::test_pre_wrap",
"tornado/test/ioloop_test.py::TestIOLoopAddCallback::test_pre_wrap_with_args",
"tornado/test/ioloop_test.py::TestIOLoopAddCallbackFromSignal::test_pre_wrap",
"tornado/test/ioloop_test.py::TestIOLoopAddCallbackFromSignal::test_pre_wrap_with_args",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_add_future_stack_context",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_add_future_threads",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_run_in_executor_gen",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_run_in_executor_native",
"tornado/test/ioloop_test.py::TestIOLoopFutures::test_set_default_executor",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_async_exception",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_async_result",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_current",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_native_coroutine",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_sync_exception",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_sync_result",
"tornado/test/ioloop_test.py::TestIOLoopRunSync::test_timeout",
"tornado/test/ioloop_test.py::TestIOLoopConfiguration::test_asyncio",
"tornado/test/ioloop_test.py::TestIOLoopConfiguration::test_asyncio_main",
"tornado/test/ioloop_test.py::TestIOLoopConfiguration::test_default"
] | [] | Apache License 2.0 | 2,349 | 525 | [
"tornado/ioloop.py"
] |
|
andreroggeri__pynubank-12 | 9e1660516600a94f949259465c371acf7256f5ae | 2018-04-01 22:14:51 | 9e1660516600a94f949259465c371acf7256f5ae | coveralls:
[](https://coveralls.io/builds/16288275)
Coverage remained the same at 100.0% when pulling **19757302b91677ce5e659bbb28fc124dd5ef4ab7 on janjitsu:master** into **9e1660516600a94f949259465c371acf7256f5ae on andreroggeri:master**.
andreroggeri: Muito obrigado @janjitsu 🤑 | diff --git a/pynubank/nubank.py b/pynubank/nubank.py
index 3ba66af..ae31690 100644
--- a/pynubank/nubank.py
+++ b/pynubank/nubank.py
@@ -54,6 +54,7 @@ class Nubank:
self.headers['Authorization'] = 'Bearer {}'.format(data['access_token'])
self.feed_url = data['_links']['events']['href']
self.query_url = data['_links']['ghostflame']['href']
+ self.bills_url = data['_links']['bills_summary']['href']
def get_card_feed(self):
request = requests.get(self.feed_url, headers=self.headers)
@@ -63,6 +64,10 @@ class Nubank:
feed = self.get_card_feed()
return list(filter(lambda x: x['category'] == 'transaction', feed['events']))
+ def get_card_bills(self):
+ request = requests.get(self.bills_url, headers=self.headers)
+ return json.loads(request.content.decode('utf-8'))
+
def get_account_feed(self):
data = self._make_graphql_request('account_feed')
return data['data']['viewer']['savingsAccount']['feed']
| Acessar faturas do cartão
Olá, gostaria de um método para acessar as faturas do cartão! | andreroggeri/pynubank | diff --git a/tests/test_nubank_client.py b/tests/test_nubank_client.py
index 6626e1f..787a858 100644
--- a/tests/test_nubank_client.py
+++ b/tests/test_nubank_client.py
@@ -104,6 +104,132 @@ def events_return():
}
}
[email protected]
+def bills_return():
+ return {
+ "_links": {
+ "future": {
+ "href": "https://prod-s0-billing.nubank.com.br/api/accounts/abcde-fghi-jklmn-opqrst-uvxz/bills/future"
+ },
+ "open": {
+ "href": "https://prod-s0-billing.nubank.com.br/api/accounts/abcde-fghi-jklmn-opqrst-uvxz/bills/open"
+ }
+ },
+ "bills": [
+ {
+ "state": "future",
+ "summary": {
+ "adjustments": "0",
+ "close_date": "2018-05-03",
+ "due_date": "2018-05-10",
+ "effective_due_date": "2018-05-10",
+ "expenses": "126.94",
+ "fees": "0",
+ "interest": 0,
+ "interest_charge": "0",
+ "interest_rate": "0.1375",
+ "interest_reversal": "0",
+ "international_tax": "0",
+ "minimum_payment": 0,
+ "open_date": "2018-04-03",
+ "paid": 0,
+ "past_balance": 0,
+ "payments": "0",
+ "precise_minimum_payment": "0",
+ "precise_total_balance": "126.94",
+ "previous_bill_balance": "0",
+ "tax": "0",
+ "total_accrued": "0",
+ "total_balance": 12694,
+ "total_credits": "0",
+ "total_cumulative": 12694,
+ "total_financed": "0",
+ "total_international": "0",
+ "total_national": "126.94",
+ "total_payments": "0"
+ }
+ },
+ {
+ "_links": {
+ "self": {
+ "href": "https://prod-s0-billing.nubank.com.br/api/accounts/abcde-fghi-jklmn-opqrst-uvxz/bills/open"
+ }
+ },
+ "state": "open",
+ "summary": {
+ "adjustments": "0",
+ "close_date": "2018-04-03",
+ "due_date": "2018-04-10",
+ "effective_due_date": "2018-04-10",
+ "expenses": "303.36",
+ "fees": "0",
+ "interest": 0,
+ "interest_charge": "0",
+ "interest_rate": "0.1375",
+ "interest_reversal": "0",
+ "international_tax": "0",
+ "minimum_payment": 0,
+ "open_date": "2018-03-03",
+ "paid": 0,
+ "past_balance": 0,
+ "payments": "-285.15",
+ "precise_minimum_payment": "0",
+ "precise_total_balance": "303.362041645013",
+ "previous_bill_balance": "285.152041645013",
+ "tax": "0",
+ "total_accrued": "0",
+ "total_balance": 30336,
+ "total_credits": "0",
+ "total_cumulative": 30336,
+ "total_financed": "0",
+ "total_international": "0",
+ "total_national": "303.36",
+ "total_payments": "-285.15"
+ }
+ },
+ {
+ "_links": {
+ "self": {
+ "href": "https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz"
+ }
+ },
+ "href": "nuapp://bill/abcde-fghi-jklmn-opqrst-uvxz",
+ "id": "abcde-fghi-jklmn-opqrst-uvxz",
+ "state": "overdue",
+ "summary": {
+ "adjustments": "-63.99106066",
+ "close_date": "2018-03-03",
+ "due_date": "2018-03-10",
+ "effective_due_date": "2018-03-12",
+ "expenses": "364.14",
+ "fees": "0",
+ "interest": 0,
+ "interest_charge": "0",
+ "interest_rate": "0.1375",
+ "interest_reversal": "0",
+ "international_tax": "0",
+ "minimum_payment": 8003,
+ "open_date": "2018-02-03",
+ "paid": 28515,
+ "past_balance": -1500,
+ "payments": "-960.47",
+ "precise_minimum_payment": "480.02544320601300",
+ "precise_total_balance": "285.152041645013",
+ "previous_bill_balance": "945.473102305013",
+ "remaining_minimum_payment": 0,
+ "tax": "0",
+ "total_accrued": "0",
+ "total_balance": 28515,
+ "total_credits": "-64.18",
+ "total_cumulative": 30015,
+ "total_financed": "0",
+ "total_international": "0",
+ "total_national": "364.32893934",
+ "total_payments": "-960.47"
+ }
+ },
+ ]
+ }
@pytest.fixture
def account_balance_return():
@@ -192,6 +318,55 @@ def test_get_card_feed(monkeypatch, authentication_return, events_return):
assert events[0]['href'] == 'nuapp://transaction/abcde-fghi-jklmn-opqrst-uvxz'
assert events[0]['_links']['self']['href'] == 'https://prod-s0-webapp-proxy.nubank.com.br/api/proxy/_links_123'
+def test_get_card_bills(monkeypatch, authentication_return, bills_return):
+ response = create_fake_response(authentication_return)
+ monkeypatch.setattr('requests.post', MagicMock(return_value=response))
+ nubank_client = Nubank('12345678909', '12345678')
+
+ response = create_fake_response(bills_return)
+ monkeypatch.setattr('requests.get', MagicMock(return_value=response))
+
+ bills_response = nubank_client.get_card_bills()
+ assert bills_response['_links']['future']['href'] == 'https://prod-s0-billing.nubank.com.br/api/accounts/abcde-fghi-jklmn-opqrst-uvxz/bills/future'
+ assert bills_response['_links']['open']['href'] == 'https://prod-s0-billing.nubank.com.br/api/accounts/abcde-fghi-jklmn-opqrst-uvxz/bills/open'
+
+ bills = bills_response['bills']
+ assert len(bills) == 3
+ assert bills[2]['_links']['self']['href'] == "https://prod-s0-billing.nubank.com.br/api/bills/abcde-fghi-jklmn-opqrst-uvxz"
+ assert bills[2]['href'] == 'nuapp://bill/abcde-fghi-jklmn-opqrst-uvxz'
+ assert bills[2]['id'] == 'abcde-fghi-jklmn-opqrst-uvxz'
+ assert bills[2]['state'] == 'overdue'
+
+ summary = bills[2]['summary']
+ assert summary["adjustments"] == "-63.99106066"
+ assert summary["close_date"] == "2018-03-03"
+ assert summary["due_date"] == "2018-03-10"
+ assert summary["effective_due_date"] == "2018-03-12"
+ assert summary["expenses"] == "364.14"
+ assert summary["fees"] == "0"
+ assert summary["interest"] == 0
+ assert summary["interest_charge"] == "0"
+ assert summary["interest_rate"] == "0.1375"
+ assert summary["interest_reversal"] == "0"
+ assert summary["international_tax"] == "0"
+ assert summary["minimum_payment"] == 8003
+ assert summary["open_date"] == "2018-02-03"
+ assert summary["paid"] == 28515
+ assert summary["past_balance"] == -1500
+ assert summary["payments"] == "-960.47"
+ assert summary["precise_minimum_payment"] == "480.02544320601300"
+ assert summary["precise_total_balance"] == "285.152041645013"
+ assert summary["previous_bill_balance"] == "945.473102305013"
+ assert summary["remaining_minimum_payment"] == 0
+ assert summary["tax"] == "0"
+ assert summary["total_accrued"] == "0"
+ assert summary["total_balance"] == 28515
+ assert summary["total_credits"] == "-64.18"
+ assert summary["total_cumulative"] == 30015
+ assert summary["total_financed"] == "0"
+ assert summary["total_international"] == "0"
+ assert summary["total_national"] == "364.32893934"
+ assert summary["total_payments"] == "-960.47"
def test_get_card_statements(monkeypatch, authentication_return, events_return):
response = create_fake_response(authentication_return)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
chardet==3.0.4
exceptiongroup==1.2.2
idna==2.5
iniconfig==2.1.0
nose==1.3.7
packaging==24.2
pluggy==1.5.0
-e git+https://github.com/andreroggeri/pynubank.git@9e1660516600a94f949259465c371acf7256f5ae#egg=pynubank
pytest==8.3.5
requests==2.18.1
tomli==2.2.1
urllib3==1.21.1
| name: pynubank
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- chardet==3.0.4
- exceptiongroup==1.2.2
- idna==2.5
- iniconfig==2.1.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.18.1
- tomli==2.2.1
- urllib3==1.21.1
prefix: /opt/conda/envs/pynubank
| [
"tests/test_nubank_client.py::test_get_card_bills"
] | [] | [
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[100]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[101]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[102]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[103]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[201]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[202]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[203]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[204]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[205]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[206]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[207]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[208]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[226]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[300]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[301]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[302]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[303]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[304]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[305]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[306]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[307]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[308]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[400]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[401]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[402]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[403]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[404]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[405]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[406]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[407]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[408]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[409]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[410]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[411]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[412]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[413]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[414]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[415]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[416]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[417]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[418]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[420]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[421]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[422]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[423]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[424]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[426]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[428]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[429]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[431]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[440]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[444]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[449]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[450]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[451]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[495]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[496]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[497]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[498]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[499]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[500]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[501]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[502]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[503]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[504]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[505]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[506]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[507]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[508]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[509]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[510]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[511]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[520]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[521]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[522]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[523]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[524]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[525]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[526]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[527]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[530]",
"tests/test_nubank_client.py::test_authentication_failure_raise_exception[598]",
"tests/test_nubank_client.py::test_authentication_succeeds",
"tests/test_nubank_client.py::test_get_card_feed",
"tests/test_nubank_client.py::test_get_card_statements",
"tests/test_nubank_client.py::test_get_account_balance",
"tests/test_nubank_client.py::test_get_account_feed",
"tests/test_nubank_client.py::test_get_account_statements",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[100]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[101]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[102]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[103]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[201]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[202]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[203]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[204]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[205]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[206]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[207]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[208]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[226]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[300]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[301]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[302]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[303]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[304]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[305]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[306]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[307]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[308]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[400]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[401]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[402]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[403]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[404]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[405]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[406]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[407]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[408]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[409]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[410]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[411]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[412]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[413]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[414]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[415]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[416]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[417]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[418]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[420]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[421]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[422]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[423]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[424]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[426]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[428]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[429]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[431]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[440]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[444]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[449]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[450]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[451]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[495]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[496]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[497]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[498]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[499]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[500]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[501]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[502]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[503]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[504]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[505]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[506]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[507]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[508]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[509]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[510]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[511]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[520]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[521]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[522]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[523]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[524]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[525]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[526]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[527]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[530]",
"tests/test_nubank_client.py::test_grapql_query_raises_exeption[598]"
] | [] | MIT License | 2,351 | 283 | [
"pynubank/nubank.py"
] |
conan-io__conan-2705 | e1e3cce13e69576f562296a1e6d64352e8e84ccd | 2018-04-02 15:07:40 | 419beea8c76ebf9271c8612339bdb0e5aa376306 | diff --git a/conans/__init__.py b/conans/__init__.py
index 97c78e433..2333a851a 100644
--- a/conans/__init__.py
+++ b/conans/__init__.py
@@ -16,5 +16,5 @@ COMPLEX_SEARCH_CAPABILITY = "complex_search"
SERVER_CAPABILITIES = [COMPLEX_SEARCH_CAPABILITY, ]
-__version__ = '1.2.0'
+__version__ = '1.2.1'
diff --git a/conans/client/build/cppstd_flags.py b/conans/client/build/cppstd_flags.py
index e5ca936c3..5435d63e6 100644
--- a/conans/client/build/cppstd_flags.py
+++ b/conans/client/build/cppstd_flags.py
@@ -26,7 +26,7 @@ def cppstd_flag(compiler, compiler_version, cppstd):
def cppstd_default(compiler, compiler_version):
default = {"gcc": _gcc_cppstd_default(compiler_version),
"clang": _clang_cppstd_default(compiler_version),
- "apple-clang": "gnu98",
+ "apple-clang": "gnu98", # Confirmed in apple-clang 9.1 with a simple "auto i=1;"
"Visual Studio": _visual_cppstd_default(compiler_version)}.get(str(compiler), None)
return default
@@ -85,6 +85,11 @@ def _cppstd_apple_clang(clang_version, cppstd):
v17 = "c++1z"
vgnu17 = "gnu++1z"
+ if Version(clang_version) >= "9.1":
+ # Not confirmed that it didn't work before 9.1 but 1z is still valid, so we are ok
+ v17 = "c++17"
+ vgnu17 = "gnu++17"
+
flag = {"98": v98, "gnu98": vgnu98,
"11": v11, "gnu11": vgnu11,
"14": v14, "gnu14": vgnu14,
diff --git a/conans/client/conf/__init__.py b/conans/client/conf/__init__.py
index 267416ced..96dd58553 100644
--- a/conans/client/conf/__init__.py
+++ b/conans/client/conf/__init__.py
@@ -65,7 +65,7 @@ compiler:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0", "5.0", "6.0"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
- version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0"]
+ version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0", "9.1"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release]
diff --git a/conans/client/migrations.py b/conans/client/migrations.py
index ef0ba6b31..27a86af1f 100644
--- a/conans/client/migrations.py
+++ b/conans/client/migrations.py
@@ -41,7 +41,7 @@ class ClientMigrator(Migrator):
# VERSION 0.1
if old_version is None:
return
- if old_version < Version("1.1.0-dev"):
+ if old_version < Version("1.2.1"):
old_settings = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
@@ -82,9 +82,9 @@ compiler:
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
- "5", "5.1", "5.2", "5.3", "5.4",
+ "5", "5.1", "5.2", "5.3", "5.4", "5.5",
"6", "6.1", "6.2", "6.3", "6.4",
- "7", "7.1", "7.2"]
+ "7", "7.1", "7.2", "7.3"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
@@ -93,13 +93,14 @@ compiler:
version: ["8", "9", "10", "11", "12", "14", "15"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp, v140, v140_xp, v140_clang_c2, LLVM-vs2014, LLVM-vs2014_xp, v141, v141_xp, v141_clang_c2]
clang:
- version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0", "5.0"]
+ version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0", "5.0", "6.0"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release]
+cppstd: [None, 98, gnu98, 11, gnu11, 14, gnu14, 17, gnu17]
"""
self._update_settings_yml(old_settings)
| Apple-clang 9.1 not supported
Please, add apple-clang 9.1 to supported platform. I had problem with compile missing libraries. I fixed it with manually edited ```.conan/settings.yml```
My CMake output:
```
-- The CXX compiler identification is AppleClang 9.1.0.9020039
```
Clang version:
```sh
$ clang++ -v
Apple LLVM version 9.1.0 (clang-902.0.39.1)
Target: x86_64-apple-darwin17.5.0
Thread model: posix
InstalledDir: /Library/Developer/CommandLineTools/usr/bin
```
Conan version:
```
$ conan -v
Conan version 1.2.0
``` | conan-io/conan | diff --git a/conans/test/build_helpers/cpp_std_flags_test.py b/conans/test/build_helpers/cpp_std_flags_test.py
index b110bb3f4..7e7c1c5e4 100644
--- a/conans/test/build_helpers/cpp_std_flags_test.py
+++ b/conans/test/build_helpers/cpp_std_flags_test.py
@@ -136,6 +136,10 @@ class CompilerFlagsTest(unittest.TestCase):
self.assertEquals(cppstd_flag("apple-clang", "9", "14"), '-std=c++14')
self.assertEquals(cppstd_flag("apple-clang", "9", "17"), "-std=c++1z")
+ self.assertEquals(cppstd_flag("apple-clang", "9.1", "11"), '-std=c++11')
+ self.assertEquals(cppstd_flag("apple-clang", "9.1", "14"), '-std=c++14')
+ self.assertEquals(cppstd_flag("apple-clang", "9.1", "17"), "-std=c++17")
+
def test_apple_clang_cppstd_defaults(self):
self.assertEquals(cppstd_default("apple-clang", "2"), "gnu98")
self.assertEquals(cppstd_default("apple-clang", "3"), "gnu98")
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 4
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_dev.txt",
"conans/requirements_server.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==1.6.6
attrs==22.2.0
beautifulsoup4==4.12.3
bottle==0.12.25
certifi==2021.5.30
charset-normalizer==2.0.12
codecov==2.1.13
colorama==0.3.9
-e git+https://github.com/conan-io/conan.git@e1e3cce13e69576f562296a1e6d64352e8e84ccd#egg=conan
coverage==4.2
deprecation==2.0.7
distro==1.1.0
fasteners==0.19
future==0.16.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==1.3.0
node-semver==0.2.0
nose==1.3.7
packaging==21.3
parameterized==0.8.1
patch==1.16
pbr==6.1.1
pluggy==1.0.0
pluginbase==0.7
py==1.11.0
Pygments==2.14.0
PyJWT==1.7.1
pylint==1.8.4
pyparsing==3.1.4
pytest==7.0.1
PyYAML==3.12
requests==2.27.1
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
waitress==2.0.0
WebOb==1.8.9
WebTest==2.0.35
wrapt==1.16.0
zipp==3.6.0
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==1.6.6
- attrs==22.2.0
- beautifulsoup4==4.12.3
- bottle==0.12.25
- charset-normalizer==2.0.12
- codecov==2.1.13
- colorama==0.3.9
- coverage==4.2
- deprecation==2.0.7
- distro==1.1.0
- fasteners==0.19
- future==0.16.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==1.3.0
- node-semver==0.2.0
- nose==1.3.7
- packaging==21.3
- parameterized==0.8.1
- patch==1.16
- pbr==6.1.1
- pluggy==1.0.0
- pluginbase==0.7
- py==1.11.0
- pygments==2.14.0
- pyjwt==1.7.1
- pylint==1.8.4
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==3.12
- requests==2.27.1
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- waitress==2.0.0
- webob==1.8.9
- webtest==2.0.35
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/conan
| [
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_apple_clang_cppstd_flags"
] | [] | [
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_apple_clang_cppstd_defaults",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_clang_cppstd_defaults",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_clang_cppstd_flags",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_gcc_cppstd_defaults",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_gcc_cppstd_flags",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_visual_cppstd_defaults",
"conans/test/build_helpers/cpp_std_flags_test.py::CompilerFlagsTest::test_visual_cppstd_flags"
] | [] | MIT License | 2,352 | 1,612 | [
"conans/__init__.py",
"conans/client/build/cppstd_flags.py",
"conans/client/conf/__init__.py",
"conans/client/migrations.py"
] |
|
conan-io__conan-2708 | 777f846df4cabe366ddcb88e39f6c7cd8970d7e1 | 2018-04-03 11:42:07 | 419beea8c76ebf9271c8612339bdb0e5aa376306 | diff --git a/conans/client/userio.py b/conans/client/userio.py
index b42d8e39d..c64d67ff9 100644
--- a/conans/client/userio.py
+++ b/conans/client/userio.py
@@ -40,6 +40,7 @@ class UserIO(object):
def request_login(self, remote_name, username=None):
"""Request user to input their name and password
:param username If username is specified it only request password"""
+ self._raise_if_non_interactive()
user_input = ''
while not username:
try:
| Non interactive mode also prevents prompt from `conan user`
The new non interactive mode also prevents the prompt from `conan user <name> -p`. This is not what is specified in the documentation. (And also not what was originally implemented.)
I hope the prompt can be re-enabled for this particular case. If not, the documentation should be changed.
Oh, and a purely cosmetic remark: the non interactive error only appears after the prompt has been displayed.
Using Conan 1.2 on Windows 7.
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
| conan-io/conan | diff --git a/conans/test/command/user_test.py b/conans/test/command/user_test.py
index 86d7d15c4..c059b50e2 100644
--- a/conans/test/command/user_test.py
+++ b/conans/test/command/user_test.py
@@ -158,5 +158,10 @@ class ConanLib(ConanFile):
error = conan.run('user -p -r default lasote', ignore_error=True)
self.assertTrue(error)
self.assertIn('ERROR: Conan interactive mode disabled', conan.user_io.out)
+ self.assertNotIn("Please enter a password for \"lasote\" account:", conan.out)
conan.run("user")
self.assertIn("Current 'default' user: None", conan.user_io.out)
+ error = conan.run("user -p", ignore_error=True)
+ self.assertTrue(error)
+ self.assertIn('ERROR: Conan interactive mode disabled', conan.out)
+ self.assertNotIn("Remote 'default' username:", conan.out)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_osx.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asn1crypto==1.5.1
astroid==1.6.6
attrs==22.2.0
beautifulsoup4==4.12.3
bottle==0.12.25
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
colorama==0.3.9
-e git+https://github.com/conan-io/conan.git@777f846df4cabe366ddcb88e39f6c7cd8970d7e1#egg=conan
coverage==4.2
cryptography==2.1.4
deprecation==2.0.7
distro==1.1.0
fasteners==0.19
future==0.16.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==1.3.0
ndg-httpsclient==0.4.4
node-semver==0.2.0
nose==1.3.7
packaging==21.3
parameterized==0.8.1
patch==1.16
pbr==6.1.1
pluggy==1.0.0
pluginbase==0.7
py==1.11.0
pyasn==1.5.0b7
pyasn1==0.5.1
pycparser==2.21
Pygments==2.14.0
PyJWT==1.7.1
pylint==1.8.4
pyOpenSSL==17.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==3.12
requests==2.27.1
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
waitress==2.0.0
WebOb==1.8.9
WebTest==2.0.35
wrapt==1.16.0
zipp==3.6.0
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asn1crypto==1.5.1
- astroid==1.6.6
- attrs==22.2.0
- beautifulsoup4==4.12.3
- bottle==0.12.25
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- colorama==0.3.9
- coverage==4.2
- cryptography==2.1.4
- deprecation==2.0.7
- distro==1.1.0
- fasteners==0.19
- future==0.16.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==1.3.0
- ndg-httpsclient==0.4.4
- node-semver==0.2.0
- nose==1.3.7
- packaging==21.3
- parameterized==0.8.1
- patch==1.16
- pbr==6.1.1
- pluggy==1.0.0
- pluginbase==0.7
- py==1.11.0
- pyasn==1.5.0b7
- pyasn1==0.5.1
- pycparser==2.21
- pygments==2.14.0
- pyjwt==1.7.1
- pylint==1.8.4
- pyopenssl==17.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==3.12
- requests==2.27.1
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- waitress==2.0.0
- webob==1.8.9
- webtest==2.0.35
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/conan
| [
"conans/test/command/user_test.py::UserTest::test_command_user_with_interactive_password_login_prompt_disabled"
] | [] | [
"conans/test/command/user_test.py::UserTest::test_clean",
"conans/test/command/user_test.py::UserTest::test_command_interactive_only",
"conans/test/command/user_test.py::UserTest::test_command_user_list",
"conans/test/command/user_test.py::UserTest::test_command_user_no_remotes",
"conans/test/command/user_test.py::UserTest::test_command_user_with_interactive_password",
"conans/test/command/user_test.py::UserTest::test_command_user_with_password",
"conans/test/command/user_test.py::UserTest::test_command_user_with_password_spaces",
"conans/test/command/user_test.py::UserTest::test_with_no_user",
"conans/test/command/user_test.py::UserTest::test_with_remote_no_connect"
] | [] | MIT License | 2,354 | 139 | [
"conans/client/userio.py"
] |
|
nipy__nipype-2527 | e446466290b9ccba5d5aa589971c97e744d9267b | 2018-04-03 13:30:50 | 704b97dee7848283692bac38f04541c5af2a87b5 | diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py
index 9f306c7b8..cb01ae4a2 100644
--- a/nipype/interfaces/afni/utils.py
+++ b/nipype/interfaces/afni/utils.py
@@ -564,8 +564,11 @@ class CatMatvecInputSpec(AFNICommandInputSpec):
argstr="%s",
position=-2)
out_file = File(
- desc="File to write concattenated matvecs to",
argstr=" > %s",
+ name_template='%s_cat.aff12.1D',
+ name_source='in_file',
+ keep_extension=False,
+ desc="File to write concattenated matvecs to",
position=-1,
mandatory=True)
matrix = traits.Bool(
| AFNI CatMatvec: Caching fails and output file undefined
### Summary
AFNI CatMatvec interface caching fails and output file is undefined
### How to replicate the behavior
```Python
from nipype.interfaces import afni
from nipype.caching import Memory
memory = Memory('/tmp/test_catmatvec/')
catmatvec = memory.cache(afni.CatMatvec)
out_catmatvec = catmatvec(in_file=[('/tmp/test_caching/allineate_affine.aff12.1D', 'ONELINE')], out_file='/tmp/test_catmatvec2.aff12.1D')
print(out_catmatvec.outputs)
```
gives `out_file = <undefined>`. Moreover, when launching the same code twice the computations are restarted
### Platform details:
{'nibabel_version': '2.2.1', 'sys_executable': '/home/salma/anaconda2/bin/python', 'networkx_version': '2.0', 'numpy_version': '1.13.3', 'sys_platform': 'linux2', 'sys_version': '2.7.14 |Anaconda custom (64-bit)| (default, Oct 16 2017, 17:29:19) \n[GCC 7.2.0]', 'commit_source': 'repository', 'commit_hash': 'e446466', 'pkg_path': '/home/salma/CODE/nipype/nipype', 'nipype_version': '1.0.3-dev+ge446466', 'traits_version': '4.6.0', 'scipy_version': '1.0.0'}
1.0.3-dev+ge446466
| nipy/nipype | diff --git a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py
index dc1c981bb..df0ac34e0 100644
--- a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py
+++ b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py
@@ -38,7 +38,10 @@ def test_CatMatvec_inputs():
),
out_file=dict(
argstr=' > %s',
+ keep_extension=False,
mandatory=True,
+ name_source='in_file',
+ name_template='%s_cat.aff12.1D',
position=-1,
),
outputtype=dict(),
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
codecov==2.1.13
configparser==5.2.0
coverage==6.2
cycler==0.11.0
decorator==4.4.2
docutils==0.18.1
execnet==1.9.0
funcsigs==1.0.2
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
Jinja2==3.0.3
kiwisolver==1.3.1
lxml==5.3.1
MarkupSafe==2.0.1
matplotlib==3.3.4
mock==5.2.0
networkx==2.5.1
nibabel==3.2.2
-e git+https://github.com/nipy/nipype.git@e446466290b9ccba5d5aa589971c97e744d9267b#egg=nipype
numpy==1.19.5
numpydoc==1.1.0
packaging==21.3
Pillow==8.4.0
pluggy==1.0.0
prov==1.5.0
py==1.11.0
pydot==1.4.2
pydotplus==2.0.2
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-env==0.6.2
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
rdflib==5.0.0
requests==2.27.1
scipy==1.5.4
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
traits==6.4.1
typing_extensions==4.1.1
urllib3==1.26.20
yapf==0.32.0
zipp==3.6.0
| name: nipype
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- click==8.0.4
- codecov==2.1.13
- configparser==5.2.0
- coverage==6.2
- cycler==0.11.0
- decorator==4.4.2
- docutils==0.18.1
- execnet==1.9.0
- funcsigs==1.0.2
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- jinja2==3.0.3
- kiwisolver==1.3.1
- lxml==5.3.1
- markupsafe==2.0.1
- matplotlib==3.3.4
- mock==5.2.0
- networkx==2.5.1
- nibabel==3.2.2
- numpy==1.19.5
- numpydoc==1.1.0
- packaging==21.3
- pillow==8.4.0
- pluggy==1.0.0
- prov==1.5.0
- py==1.11.0
- pydot==1.4.2
- pydotplus==2.0.2
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-env==0.6.2
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- rdflib==5.0.0
- requests==2.27.1
- scipy==1.5.4
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- traits==6.4.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- yapf==0.32.0
- zipp==3.6.0
prefix: /opt/conda/envs/nipype
| [
"nipype/interfaces/afni/tests/test_auto_CatMatvec.py::test_CatMatvec_inputs"
] | [] | [
"nipype/interfaces/afni/tests/test_auto_CatMatvec.py::test_CatMatvec_outputs"
] | [] | Apache License 2.0 | 2,355 | 199 | [
"nipype/interfaces/afni/utils.py"
] |
|
cevoaustralia__aws-google-auth-71 | 390e181516b02baf55bdf67296fe3437e7c8904a | 2018-04-03 21:57:06 | d473d67b0772700942f5bb0db3522af0a1005453 | diff --git a/aws_google_auth/__init__.py b/aws_google_auth/__init__.py
index 0ec8126..2163356 100644
--- a/aws_google_auth/__init__.py
+++ b/aws_google_auth/__init__.py
@@ -51,7 +51,7 @@ def exit_if_unsupported_python():
sys.exit(1)
-def main(cli_args):
+def cli(cli_args):
try:
exit_if_unsupported_python()
@@ -201,6 +201,10 @@ def process_auth(args, config):
amazon_client.print_export_line()
-if __name__ == '__main__':
+def main():
cli_args = sys.argv[1:]
- main(cli_args)
+ cli(cli_args)
+
+
+if __name__ == '__main__':
+ main()
| main() takes exactly 1 argument (0 given)
I'm getting the following error, introduced in 48d22e4d62bb6e216cd8932739ea0be4735e2588 (Determined by `git bisect`). I don't have the time to dig right now, but I will later.
```
$ aws-google-auth --help
Traceback (most recent call last):
File "/Users/mide/virtualenv/aws-google-auth-dev/bin/aws-google-auth", line 11, in <module>
load_entry_point('aws-google-auth', 'console_scripts', 'aws-google-auth')()
TypeError: main() takes exactly 1 argument (0 given)
``` | cevoaustralia/aws-google-auth | diff --git a/aws_google_auth/tests/test_init.py b/aws_google_auth/tests/test_init.py
index 9a298a8..cd818d2 100644
--- a/aws_google_auth/tests/test_init.py
+++ b/aws_google_auth/tests/test_init.py
@@ -11,6 +11,20 @@ class TestInit(unittest.TestCase):
def setUp(self):
pass
+ @patch('aws_google_auth.cli', spec=True)
+ def test_main_method_has_no_parameters(self, mock_cli):
+ """
+ This is the entrypoint for the cli tool, and should require no parameters
+
+ :param mock_cli:
+ :return:
+ """
+
+ # Function under test
+ aws_google_auth.main()
+
+ self.assertTrue(mock_cli.called)
+
@patch('aws_google_auth.exit_if_unsupported_python', spec=True)
@patch('aws_google_auth.resolve_config', spec=True)
@patch('aws_google_auth.process_auth', spec=True)
@@ -22,7 +36,7 @@ class TestInit(unittest.TestCase):
aws_google_auth.resolve_config.return_value = mock_config
# Function under test
- aws_google_auth.main([])
+ aws_google_auth.cli([])
self.assertTrue(exit_if_unsupported_python.called)
self.assertTrue(resolve_config.called)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_git_commit_hash"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/cevoaustralia/aws-google-auth.git@390e181516b02baf55bdf67296fe3437e7c8904a#egg=aws_google_auth
beautifulsoup4==4.13.3
boto3==1.37.23
botocore==1.37.23
certifi==2025.1.31
charset-normalizer==3.4.1
configparser==7.2.0
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
jmespath==1.0.1
lxml==5.3.1
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
requests==2.32.3
s3transfer==0.11.4
six==1.17.0
soupsieve==2.6
tabulate==0.9.0
tomli==2.2.1
typing_extensions==4.13.0
tzlocal==5.3.1
urllib3==1.26.20
| name: aws-google-auth
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- beautifulsoup4==4.13.3
- boto3==1.37.23
- botocore==1.37.23
- certifi==2025.1.31
- charset-normalizer==3.4.1
- configparser==7.2.0
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- jmespath==1.0.1
- lxml==5.3.1
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- requests==2.32.3
- s3transfer==0.11.4
- six==1.17.0
- soupsieve==2.6
- tabulate==0.9.0
- tomli==2.2.1
- typing-extensions==4.13.0
- tzlocal==5.3.1
- urllib3==1.26.20
prefix: /opt/conda/envs/aws-google-auth
| [
"aws_google_auth/tests/test_init.py::TestInit::test_main_method_chaining",
"aws_google_auth/tests/test_init.py::TestInit::test_main_method_has_no_parameters"
] | [] | [
"aws_google_auth/tests/test_init.py::TestInit::test_process_auth_dont_resolve_alias",
"aws_google_auth/tests/test_init.py::TestInit::test_process_auth_specified_role",
"aws_google_auth/tests/test_init.py::TestInit::test_process_auth_standard",
"aws_google_auth/tests/test_init.py::TestInit::test_process_auth_with_profile",
"aws_google_auth/tests/test_init.py::TestInit::test_process_auth_with_saml_cache"
] | [] | MIT License | 2,358 | 190 | [
"aws_google_auth/__init__.py"
] |
|
TheFriendlyCoder__friendlypins-2 | 3ebf4892111351fc82c38a2b1a9f81ab099294a9 | 2018-04-04 02:36:59 | 3ebf4892111351fc82c38a2b1a9f81ab099294a9 | diff --git a/setup.py b/setup.py
index c322c9e..7827c73 100755
--- a/setup.py
+++ b/setup.py
@@ -7,20 +7,18 @@ from setuptools import setup, find_packages
# project specific parameters
PROJECT_NAME = 'friendlypins'
PROJECT_DEPENDENCIES = [
- #'requests[security]>=2.0.1',
- 'requests',
- 'six',
- 'tqdm']
+ 'requests<3.0.0,>=2.0.0',
+ 'six<2.0.0,>=1.0.0',]
PROJECT_DEV_DEPENDENCIES = [
- 'wheel',
- 'twine',
- 'pytest',
- 'pytest-cov',
- 'mock',
- 'radon',
- 'pylint',
- 'sphinx>=1.2.3',
- 'tox']
+ 'wheel<1.0.0',
+ 'twine<2.0.0',
+ 'pytest>=3.5.0<4.0.0',
+ 'pytest-cov>=2.5.0<3.0.0',
+ 'mock>=2.0.0<3.0.0',
+ 'radon>=2.2.0<3.0.0',
+ 'pylint>=1.8.0<2.0.0',
+ 'sphinx>=1.2.3<2.0.0',
+ 'tox>=3.0.0<4.0.0']
PROJECT_DESCRIPTION = 'Python wrapper around the Pinterest developer APIs'
PROJECT_KEYWORDS = 'pinterest api wrapper library'
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
diff --git a/src/friendlypins/api.py b/src/friendlypins/api.py
index 8b28950..c9540e1 100644
--- a/src/friendlypins/api.py
+++ b/src/friendlypins/api.py
@@ -1,10 +1,25 @@
"""Primary entry point for the Friendly Pinterest library"""
from __future__ import print_function
+import logging
+import requests
+from friendlypins.user import User
class API(object): # pylint: disable=too-few-public-methods
"""High level abstraction for the core Pinterest API"""
- def __init__(self):
- self.name = "hello"
+
+ # URL of the root namespace for the Pinterest API
+ _root_url = 'https://api.pinterest.com/v1'
+
+ def __init__(self, personal_access_token):
+ """Constructor
+
+ :param str personal_access_token:
+ API authentication token used for secure access to a users'
+ Pinterest data
+ """
+
+ self._log = logging.getLogger(__name__)
+ self._token = personal_access_token
def get_user(self, username=None):
"""Gets all primitives associated with a particular Pinterst user
@@ -15,11 +30,18 @@ class API(object): # pylint: disable=too-few-public-methods
returns: Pinterest user with the given name
rtype: :class:`friendly_pinterest.user.User`
"""
- print(self.name)
if username:
- return None
- return None
+ raise NotImplementedError(
+ "Querying arbitrary Pinerest users is not yet supported.")
+ else:
+ temp_url = "{0}/me".format(self._root_url)
+ temp_url += "?access_token={0}".format(self._token)
+ response = requests.get(temp_url)
+ response.raise_for_status()
+ assert 'data' in response.json()
+ return User(response.json()['data'])
+# pylint: disable-all
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/user.py b/src/friendlypins/user.py
new file mode 100644
index 0000000..02dd4e7
--- /dev/null
+++ b/src/friendlypins/user.py
@@ -0,0 +1,44 @@
+"""Interfaces for interacting with Pinterest users"""
+import logging
+
+class User(object):
+ """Abstraction around a Pinterest user and their associated data"""
+
+ def __init__(self, data):
+ """Constructor
+
+ :param dict data: JSON data parsed from the API
+ """
+ self._log = logging.getLogger(__name__)
+ self._data = data
+
+ @property
+ def unique_id(self):
+ """Gets the internal unique ID associated with the user
+ :rtype: :class:`str`
+ """
+ return self._data['id']
+
+ @property
+ def first_name(self):
+ """Gets the first name of the user
+ :rtype: :class:`str`
+ """
+ return self._data['first_name']
+
+ @property
+ def last_name(self):
+ """Gets the last name of the user
+ :rtype: :class:`str`
+ """
+ return self._data['last_name']
+
+ @property
+ def url(self):
+ """Gets the URL of the users profile
+ :rtype: :class:`str`
+ """
+ return self._data['url']
+
+if __name__ == "__main__":
+ pass
| Add support for basic connectivity
Implement basic init method for API class, allowing connections to be made to a Pinterest account by specifying a user's API token, and perform a simple query to get information about the user who's token we're authenticating with. | TheFriendlyCoder/friendlypins | diff --git a/unit_tests/test_api.py b/unit_tests/test_api.py
index 2526048..67ada2e 100644
--- a/unit_tests/test_api.py
+++ b/unit_tests/test_api.py
@@ -1,12 +1,32 @@
import pytest
+import mock
from friendlypins.api import API
-def test_constructor():
- obj = API()
-
def test_get_user():
- obj = API()
- obj.get_user()
+ obj = API('abcd1234')
+ expected_url = 'https://www.pinterest.com/MyUserName/'
+ expected_firstname = "John"
+ expected_lastname = "Doe"
+ expected_id = "12345678"
+ expected_data = {
+ 'data': {
+ 'url': expected_url,
+ 'first_name': expected_firstname,
+ 'last_name': expected_lastname,
+ 'id': expected_id
+ }
+ }
+ with mock.patch("friendlypins.api.requests") as mock_requests:
+ mock_response = mock.MagicMock()
+ mock_response.json.return_value = expected_data
+ mock_requests.get.return_value = mock_response
+ result = obj.get_user()
+
+ assert expected_url == result.url
+ assert expected_firstname == result.first_name
+ assert expected_lastname == result.last_name
+ assert expected_id == result.unique_id
+
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.11.7
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
cryptography==40.0.2
dill==0.3.4
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
-e git+https://github.com/TheFriendlyCoder/friendlypins.git@3ebf4892111351fc82c38a2b1a9f81ab099294a9#egg=friendlypins
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==5.10.1
jeepney==0.7.1
Jinja2==3.0.3
keyring==23.4.1
lazy-object-proxy==1.7.1
mando==0.7.1
MarkupSafe==2.0.1
mccabe==0.7.0
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pkginfo==1.10.0
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
Pygments==2.14.0
pylint==2.13.9
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
pytz==2025.2
radon==6.0.1
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
rfc3986==1.5.0
SecretStorage==3.3.3
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==3.28.0
tqdm==4.64.1
twine==3.8.0
typed-ast==1.5.5
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
webencodings==0.5.1
wrapt==1.16.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: friendlypins
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.11.7
- babel==2.11.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- cryptography==40.0.2
- dill==0.3.4
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- isort==5.10.1
- jeepney==0.7.1
- jinja2==3.0.3
- keyring==23.4.1
- lazy-object-proxy==1.7.1
- mando==0.7.1
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==5.2.0
- pkginfo==1.10.0
- platformdirs==2.4.0
- pycparser==2.21
- pygments==2.14.0
- pylint==2.13.9
- pytest-cov==4.0.0
- pytz==2025.2
- radon==6.0.1
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- rfc3986==1.5.0
- secretstorage==3.3.3
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tox==3.28.0
- tqdm==4.64.1
- twine==3.8.0
- typed-ast==1.5.5
- urllib3==1.26.20
- virtualenv==20.17.1
- webencodings==0.5.1
- wrapt==1.16.0
prefix: /opt/conda/envs/friendlypins
| [
"unit_tests/test_api.py::test_get_user"
] | [] | [] | [] | Apache License 2.0 | 2,359 | 1,211 | [
"setup.py",
"src/friendlypins/api.py"
] |
|
conan-io__conan-2728 | 35c00decd88eb8dfb87871209907ddce4f7ce170 | 2018-04-09 15:05:36 | 419beea8c76ebf9271c8612339bdb0e5aa376306 | lasote: Pushed again, please check | diff --git a/conans/client/action_recorder.py b/conans/client/action_recorder.py
index bf573d9cb..d771bf075 100644
--- a/conans/client/action_recorder.py
+++ b/conans/client/action_recorder.py
@@ -7,6 +7,8 @@ from datetime import datetime
from collections import namedtuple, OrderedDict
# Install actions
+from conans.model.ref import ConanFileReference, PackageReference
+
INSTALL_CACHE = 0
INSTALL_DOWNLOADED = 1
INSTALL_BUILT = 2
@@ -32,8 +34,11 @@ class ActionRecorder(object):
def __init__(self):
self._inst_recipes_actions = OrderedDict()
self._inst_packages_actions = OrderedDict()
+ self._inst_recipes_develop = set() # Recipes being created (to set dependency=False)
# ###### INSTALL METHODS ############
+ def add_recipe_being_developed(self, reference):
+ self._inst_recipes_develop.add(reference)
def _add_recipe_action(self, reference, action):
if reference not in self._inst_recipes_actions:
@@ -90,6 +95,12 @@ class ActionRecorder(object):
ret.append((_package_ref, _package_action))
return ret
+ def in_development_recipe(self, reference):
+ return reference in self._inst_recipes_develop
+
+ def get_info(self):
+ return self.get_install_info()
+
def get_install_info(self):
ret = {"error": self.install_errored,
"installed": []}
@@ -98,11 +109,15 @@ class ActionRecorder(object):
error = None if the_action.type != INSTALL_ERROR else the_action.doc
doc = {"id": str(the_ref),
"downloaded": the_action.type == INSTALL_DOWNLOADED,
- "built": the_action.type == INSTALL_BUILT,
"cache": the_action.type == INSTALL_CACHE,
"error": error,
"remote": the_action.doc.get("remote", None),
"time": the_action.time}
+ if isinstance(the_ref, ConanFileReference):
+ doc["dependency"] = not self.in_development_recipe(the_ref)
+ else:
+ doc["built"] = the_action.type == INSTALL_BUILT
+
if doc["remote"] is None and error:
doc["remote"] = error.get("remote", None)
return doc
@@ -111,7 +126,6 @@ class ActionRecorder(object):
# Could be a download and then an access to cache, we want the first one
action = actions[0]
recipe_doc = get_doc_for_ref(ref, action)
- del recipe_doc["built"] # Avoid confusions
packages = self._get_installed_packages(ref)
tmp = {"recipe": recipe_doc,
"packages": []}
diff --git a/conans/client/command.py b/conans/client/command.py
index 19513161d..f805caf51 100644
--- a/conans/client/command.py
+++ b/conans/client/command.py
@@ -220,17 +220,21 @@ class Command(object):
cwd = os.getcwd()
+ info = None
try:
- self._conan.create(args.path, name, version, user, channel,
- args.profile, args.settings, args.options,
- args.env, args.test_folder, args.not_export,
- args.build, args.keep_source, args.keep_build, args.verify,
- args.manifests, args.manifests_interactive,
- args.remote, args.update,
- test_build_folder=args.test_build_folder)
+ info = self._conan.create(args.path, name, version, user, channel,
+ args.profile, args.settings, args.options,
+ args.env, args.test_folder, args.not_export,
+ args.build, args.keep_source, args.keep_build, args.verify,
+ args.manifests, args.manifests_interactive,
+ args.remote, args.update,
+ test_build_folder=args.test_build_folder)
+ except ConanException as exc:
+ info = exc.info
+ raise
finally:
- if args.json:
- self._outputer.json_install(self._conan.recorder.get_install_info(), args.json, cwd)
+ if args.json and info:
+ self._outputer.json_install(info, args.json, cwd)
def download(self, *args):
"""Downloads recipe and binaries to the local cache, without using settings. It works
@@ -289,34 +293,38 @@ class Command(object):
args = parser.parse_args(*args)
cwd = os.getcwd()
+ info = None
try:
try:
reference = ConanFileReference.loads(args.path_or_reference)
except ConanException:
- self._conan.install(path=args.path_or_reference,
- settings=args.settings, options=args.options,
- env=args.env,
- remote=args.remote,
- verify=args.verify, manifests=args.manifests,
- manifests_interactive=args.manifests_interactive,
- build=args.build, profile_name=args.profile,
- update=args.update, generators=args.generator,
- no_imports=args.no_imports,
- install_folder=args.install_folder)
+ info = self._conan.install(path=args.path_or_reference,
+ settings=args.settings, options=args.options,
+ env=args.env,
+ remote=args.remote,
+ verify=args.verify, manifests=args.manifests,
+ manifests_interactive=args.manifests_interactive,
+ build=args.build, profile_name=args.profile,
+ update=args.update, generators=args.generator,
+ no_imports=args.no_imports,
+ install_folder=args.install_folder)
else:
- self._conan.install_reference(reference, settings=args.settings,
- options=args.options,
- env=args.env,
- remote=args.remote,
- verify=args.verify, manifests=args.manifests,
- manifests_interactive=args.manifests_interactive,
- build=args.build, profile_name=args.profile,
- update=args.update,
- generators=args.generator,
- install_folder=args.install_folder)
+ info = self._conan.install_reference(reference, settings=args.settings,
+ options=args.options,
+ env=args.env,
+ remote=args.remote,
+ verify=args.verify, manifests=args.manifests,
+ manifests_interactive=args.manifests_interactive,
+ build=args.build, profile_name=args.profile,
+ update=args.update,
+ generators=args.generator,
+ install_folder=args.install_folder)
+ except ConanException as exc:
+ info = exc.info
+ raise
finally:
- if args.json:
- self._outputer.json_install(self._conan.recorder.get_install_info(), args.json, cwd)
+ if args.json and info:
+ self._outputer.json_install(info, args.json, cwd)
def config(self, *args):
"""Manages Conan configuration. Edits the conan.conf or installs config files.
diff --git a/conans/client/conan_api.py b/conans/client/conan_api.py
index 98ee25d5a..dd50689b2 100644
--- a/conans/client/conan_api.py
+++ b/conans/client/conan_api.py
@@ -65,15 +65,21 @@ def api_method(f):
the_self = args[0]
try:
log_command(f.__name__, kwargs)
+ the_self._init_manager()
with tools.environment_append(the_self._client_cache.conan_config.env_vars):
# Patch the globals in tools
- return f(*args, **kwargs)
+ ret = f(*args, **kwargs)
+ if ret is None: # FIXME: Probably each method should manage its return
+ return the_self._recorder.get_info()
+ return ret
except Exception as exc:
msg = exception_message_safe(exc)
try:
log_exception(exc, msg)
except:
pass
+ if isinstance(exc, ConanException):
+ exc.info = the_self._recorder.get_info()
raise
return wrapper
@@ -207,13 +213,22 @@ class ConanAPIV1(object):
self._user_io = user_io
self._runner = runner
self._remote_manager = remote_manager
- self.recorder = ActionRecorder()
+ self._search_manager = search_manager
+ self._settings_preprocessor = _settings_preprocessor
self._registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
- self._manager = ConanManager(client_cache, user_io, runner, remote_manager, search_manager,
- _settings_preprocessor, self.recorder, self._registry)
+ self._recorder = None
+ self._manager = None
+
if not interactive:
self._user_io.disable_input()
+ def _init_manager(self):
+ """Every api call gets a new recorder and new manager"""
+ self._recorder = ActionRecorder()
+ self._manager = ConanManager(self._client_cache, self._user_io, self._runner,
+ self._remote_manager, self._search_manager,
+ self._settings_preprocessor, self._recorder, self._registry)
+
@api_method
def new(self, name, header=False, pure_c=False, test=False, exports_sources=False, bare=False,
cwd=None, visual_versions=None, linux_gcc_versions=None, linux_clang_versions=None,
@@ -319,6 +334,7 @@ class ConanAPIV1(object):
"or it doesn't have a conanfile.py" % tf)
test_conanfile_path = get_test_conanfile_path(test_folder)
+ self._recorder.add_recipe_being_developed(reference)
if test_conanfile_path:
pt = PackageTester(self._manager, self._user_io)
diff --git a/conans/errors.py b/conans/errors.py
index 256f20a3f..e5b73a087 100644
--- a/conans/errors.py
+++ b/conans/errors.py
@@ -68,7 +68,9 @@ class ConanException(Exception):
"""
Generic conans exception
"""
- pass
+ def __init__(self, *args, **kwargs):
+ self.info = None
+ super(ConanException, self).__init__(*args, **kwargs)
class NoRemoteAvailable(ConanException):
| Issues with the JSON created by conan create
Conan version: 1.2.0
Command: `conan create --json ./foo.json . myteam/unstable`
The generated JSON does not separate the dependency packages of the built project, from the built package. This makes it difficult to parse. | conan-io/conan | diff --git a/conans/test/command/json_output_test.py b/conans/test/command/json_output_test.py
index a9f64b87a..0cf3ba030 100644
--- a/conans/test/command/json_output_test.py
+++ b/conans/test/command/json_output_test.py
@@ -23,6 +23,7 @@ class JsonOutputTest(unittest.TestCase):
my_json = json.loads(load(os.path.join(self.client.current_folder, "myfile.json")))
self.assertFalse(my_json["error"])
self.assertEquals(my_json["installed"][0]["recipe"]["id"], "CC/1.0@private_user/channel")
+ self.assertFalse(my_json["installed"][0]["recipe"]["dependency"])
self.assertTrue(my_json["installed"][0]["recipe"]["cache"])
self.assertIsNone(my_json["installed"][0]["recipe"]["remote"])
self.assertTrue(my_json["installed"][0]["packages"][0]["built"])
@@ -37,6 +38,7 @@ class JsonOutputTest(unittest.TestCase):
self.assertIn("T", the_time_str) # Weak validation of the ISO 8601
self.assertFalse(my_json["error"])
self.assertEquals(my_json["installed"][0]["recipe"]["id"], "CC/1.0@private_user/channel")
+ self.assertTrue(my_json["installed"][0]["recipe"]["dependency"])
self.assertFalse(my_json["installed"][0]["recipe"]["cache"])
self.assertTrue(my_json["installed"][0]["recipe"]["downloaded"])
self.assertIsNotNone(my_json["installed"][0]["recipe"]["remote"])
@@ -164,6 +166,10 @@ AA*: CC/1.0@private_user/channel
my_json = load(os.path.join(self.client.current_folder, "myfile.json"))
my_json = json.loads(my_json)
+ self.assertTrue(my_json["installed"][0]["recipe"]["dependency"])
+ self.assertTrue(my_json["installed"][1]["recipe"]["dependency"])
+ self.assertTrue(my_json["installed"][2]["recipe"]["dependency"])
+
# Installed the build require CC with two options
self.assertEquals(len(my_json["installed"][2]["packages"]), 2)
self.assertEquals(my_json["installed"][2]["recipe"]["id"], "CC/1.0@private_user/channel")
diff --git a/conans/test/model/version_ranges_test.py b/conans/test/model/version_ranges_test.py
index 71012b623..919b29a8c 100644
--- a/conans/test/model/version_ranges_test.py
+++ b/conans/test/model/version_ranges_test.py
@@ -1,4 +1,5 @@
import unittest
+
from conans.test.utils.tools import TestBufferConanOutput
from conans.paths import CONANFILE
import os
diff --git a/conans/test/util/action_recorder_test.py b/conans/test/util/action_recorder_test.py
index 0a93a1570..ac85817b5 100644
--- a/conans/test/util/action_recorder_test.py
+++ b/conans/test/util/action_recorder_test.py
@@ -19,9 +19,11 @@ class ActionRecorderTest(unittest.TestCase):
def incomplete_process_test(self):
tracer = ActionRecorder()
tracer.recipe_install_error(self.ref1, INSTALL_ERROR_NETWORK, "SSL wtf", "http://drl.com")
- install_info = tracer.get_install_info()
+ tracer.add_recipe_being_developed(self.ref1)
+ install_info = tracer.get_info()
self.assertTrue(install_info["error"])
self.assertEquals(install_info["installed"][0]["packages"], [])
+ self.assertEquals(install_info["installed"][0]["recipe"]["dependency"], False)
def double_actions_test(self):
tracer = ActionRecorder()
@@ -30,7 +32,7 @@ class ActionRecorderTest(unittest.TestCase):
tracer.package_downloaded(self.ref_p1, "http://drl.com")
tracer.package_fetched_from_cache(self.ref_p1)
- install_info = tracer.get_install_info()
+ install_info = tracer.get_info()
self.assertFalse(install_info["error"])
first_installed = install_info["installed"][0]
@@ -55,12 +57,15 @@ class ActionRecorderTest(unittest.TestCase):
tracer.recipe_fetched_from_cache(self.ref3)
tracer.package_built(self.ref_p3)
+ tracer.add_recipe_being_developed(self.ref1)
- install_info = tracer.get_install_info()
+ install_info = tracer.get_info()
self.assertTrue(install_info["error"])
first_installed = install_info["installed"][0]
+
self.assertTrue(first_installed["recipe"]["cache"])
+ self.assertFalse(first_installed["recipe"]["dependency"])
self.assertFalse(first_installed["recipe"]["downloaded"])
self.assertIsNone(first_installed["recipe"]["error"])
self.assertEquals(str(first_installed["recipe"]["id"]), "lib1/1.0@conan/stable")
@@ -73,6 +78,7 @@ class ActionRecorderTest(unittest.TestCase):
second_installed = install_info["installed"][1]
self.assertFalse(second_installed["recipe"]["cache"])
+ self.assertTrue(second_installed["recipe"]["dependency"])
self.assertTrue(second_installed["recipe"]["downloaded"])
self.assertIsNone(second_installed["recipe"]["error"])
self.assertEquals(str(second_installed["recipe"]["id"]), "lib2/1.0@conan/stable")
@@ -85,6 +91,7 @@ class ActionRecorderTest(unittest.TestCase):
self.assertEquals(str(second_installed["packages"][0]["id"]), "2")
third_installed = install_info["installed"][2]
+ self.assertTrue(third_installed["recipe"]["dependency"])
self.assertFalse(third_installed["packages"][0]["cache"])
self.assertFalse(third_installed["packages"][0]["error"])
self.assertTrue(third_installed["packages"][0]["built"])
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 4
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"nose-cov",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_osx.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asn1crypto==1.5.1
astroid==1.6.6
attrs==22.2.0
beautifulsoup4==4.12.3
bottle==0.12.25
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
colorama==0.3.9
-e git+https://github.com/conan-io/conan.git@35c00decd88eb8dfb87871209907ddce4f7ce170#egg=conan
cov-core==1.15.0
coverage==4.2
cryptography==2.1.4
deprecation==2.0.7
distro==1.1.0
fasteners==0.19
future==0.16.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==1.3.0
ndg-httpsclient==0.4.4
node-semver==0.2.0
nose==1.3.7
nose-cov==1.6
packaging==21.3
parameterized==0.8.1
patch==1.16
pbr==6.1.1
pluggy==1.0.0
pluginbase==0.7
py==1.11.0
pyasn==1.5.0b7
pyasn1==0.5.1
pycparser==2.21
Pygments==2.14.0
PyJWT==1.7.1
pylint==1.8.4
pyOpenSSL==17.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==3.12
requests==2.27.1
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
waitress==2.0.0
WebOb==1.8.9
WebTest==2.0.35
wrapt==1.16.0
zipp==3.6.0
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asn1crypto==1.5.1
- astroid==1.6.6
- attrs==22.2.0
- beautifulsoup4==4.12.3
- bottle==0.12.25
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- colorama==0.3.9
- cov-core==1.15.0
- coverage==4.2
- cryptography==2.1.4
- deprecation==2.0.7
- distro==1.1.0
- fasteners==0.19
- future==0.16.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==1.3.0
- ndg-httpsclient==0.4.4
- node-semver==0.2.0
- nose==1.3.7
- nose-cov==1.6
- packaging==21.3
- parameterized==0.8.1
- patch==1.16
- pbr==6.1.1
- pluggy==1.0.0
- pluginbase==0.7
- py==1.11.0
- pyasn==1.5.0b7
- pyasn1==0.5.1
- pycparser==2.21
- pygments==2.14.0
- pyjwt==1.7.1
- pylint==1.8.4
- pyopenssl==17.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==3.12
- requests==2.27.1
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- waitress==2.0.0
- webob==1.8.9
- webtest==2.0.35
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/conan
| [
"conans/test/util/action_recorder_test.py::ActionRecorderTest::test_install"
] | [
"conans/test/command/json_output_test.py::JsonOutputTest::test_errors",
"conans/test/command/json_output_test.py::JsonOutputTest::test_json_generation",
"conans/test/command/json_output_test.py::JsonOutputTest::test_simple_fields"
] | [
"conans/test/model/version_ranges_test.py::VersionRangesTest::test_local_basic",
"conans/test/model/version_ranges_test.py::VersionRangesTest::test_remote_basic"
] | [] | MIT License | 2,381 | 2,339 | [
"conans/client/action_recorder.py",
"conans/client/command.py",
"conans/client/conan_api.py",
"conans/errors.py"
] |
dwavesystems__dimod-171 | 2859c969a064fb2b7053919c8f1b50977a408511 | 2018-04-09 20:45:34 | 8ebfffa42319aa4850cfc5a1c99a8711eac44722 | diff --git a/dimod/binary_quadratic_model.py b/dimod/binary_quadratic_model.py
index 47a6c83b..80397f47 100644
--- a/dimod/binary_quadratic_model.py
+++ b/dimod/binary_quadratic_model.py
@@ -5,6 +5,7 @@ todo - describe Ising, QUBO and BQM
"""
from __future__ import absolute_import, division
+from collections import Sized, Container, Iterable
from numbers import Number
from six import itervalues, iteritems, iterkeys
@@ -15,7 +16,7 @@ from dimod.utilities import resolve_label_conflict
from dimod.vartypes import Vartype
-class BinaryQuadraticModel(object):
+class BinaryQuadraticModel(Sized, Container, Iterable):
"""Encodes a binary quadratic model.
Binary quadratic model is the superclass that contains the `Ising model`_ and the QUBO_.
@@ -60,12 +61,23 @@ class BinaryQuadraticModel(object):
class assume that they are numeric.
Examples:
- This example creates a model with three spin variables.
+ This example creates a binary quadratic model with three spin variables.
- >>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
- ... {(0, 1): .5, (1, 2): 1.5},
- ... 1.4,
- ... dimod.SPIN)
+ >>> bqm = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5},
+ ... {(0, 1): .5, (1, 2): 1.5},
+ ... 1.4,
+ ... dimod.SPIN)
+
+ Variables can be any hashable object
+
+ >>> bqm = dimod.BinaryQuadraticModel({'a': 0.0, 'b': -1.0, 'c': 0.5},
+ ... {('a', 'b'): -1.0, ('b', 'c'): 1.5},
+ ... 1.4,
+ ... dimod.SPIN)
+ >>> len(bqm)
+ 3
+ >>> 'b' in bqm
+ True
Attributes:
linear (dict[variable, bias]):
@@ -195,7 +207,14 @@ class BinaryQuadraticModel(object):
def __len__(self):
"""The length is number of variables."""
- return len(self.linear)
+ return self.adj.__len__()
+
+ def __contains__(self, v):
+ """The variables"""
+ return self.adj.__contains__(v)
+
+ def __iter__(self):
+ return self.adj.__iter__()
##################################################################################################
# vartype properties
diff --git a/dimod/embedding/transforms.py b/dimod/embedding/transforms.py
index 332e0ec2..2b2d3b52 100644
--- a/dimod/embedding/transforms.py
+++ b/dimod/embedding/transforms.py
@@ -386,8 +386,11 @@ def unembed_response(target_response, embedding, source_bqm, chain_break_method=
chain_break_method (function, optional, default=:func:`.majority_vote`):
The method used to resolve chain breaks.
+ Returns:
+ :obj:`.Response`
+
"""
- if any(v not in source_bqm.linear for v in embedding):
+ if any(v not in embedding for v in source_bqm):
raise ValueError("given bqm does not match the embedding")
energies = []
| BinaryQuadraticModel should have a correct abstract base class
Should be `collections.abc.Sized` as currently implemented.
Also could be `collections.abc.Container` or even `collections.abc.Collection`. | dwavesystems/dimod | diff --git a/tests/test_binary_quadratic_model.py b/tests/test_binary_quadratic_model.py
index b8593fa0..72f9793d 100644
--- a/tests/test_binary_quadratic_model.py
+++ b/tests/test_binary_quadratic_model.py
@@ -191,6 +191,25 @@ class TestBinaryQuadraticModel(unittest.TestCase):
self.assertEqual(len(bqm), len(linear))
+ def test__contains__(self):
+ bqm = dimod.BinaryQuadraticModel({'a': -1}, {}, 0.0, dimod.SPIN)
+
+ self.assertIn('a', bqm)
+ self.assertNotIn('b', bqm)
+
+ bqm.add_interaction('a', 'b', .5)
+
+ self.assertIn('b', bqm)
+
+ def test__iter__(self):
+ bqm = dimod.BinaryQuadraticModel.empty(dimod.BINARY)
+
+ self.assertEqual(set(bqm), set())
+
+ bqm.add_interaction('a', 'b', -1)
+
+ self.assertEqual(set(bqm), {'a', 'b'})
+
def test_add_variable(self):
bqm = dimod.BinaryQuadraticModel({}, {('a', 'b'): -1}, 0.0, dimod.SPIN)
bqm.add_variable('a', .5)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"mock",
"coverage",
"coveralls",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
decorator==4.4.2
-e git+https://github.com/dwavesystems/dimod.git@2859c969a064fb2b7053919c8f1b50977a408511#egg=dimod
docopt==0.6.2
enum34==1.1.6
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
jsonschema==2.6.0
mock==5.2.0
networkx==2.5.1
numpy==1.11.3
packaging==21.3
pandas==0.22.0
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
six==1.11.0
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: dimod
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- decorator==4.4.2
- docopt==0.6.2
- enum34==1.1.6
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jsonschema==2.6.0
- mock==5.2.0
- networkx==2.5.1
- numpy==1.11.3
- packaging==21.3
- pandas==0.22.0
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- six==1.11.0
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/dimod
| [
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__contains__",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__iter__"
] | [] | [
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__eq__",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__len__",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test__repr__",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_interaction",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_interaction_counterpart",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_interactions_from",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_offset",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_variable",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_variable_counterpart",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_add_variables_from",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_binary_property",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_binary_property_relabel",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_change_vartype",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_constract_variables",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_construction",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_construction_quadratic",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_construction_vartype",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_copy",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_fix_variable",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_flip_variable",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_partial_relabel_copy",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_partial_relabel_inplace",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_typical",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_typical_copy",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_typical_inplace",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_with_identity",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_relabel_with_overlap",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_interaction",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_interactions_from",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_offset",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_variable",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_remove_variables_from",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_scale",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_spin_property",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_spin_property_relabel",
"tests/test_binary_quadratic_model.py::TestBinaryQuadraticModel::test_update",
"tests/test_binary_quadratic_model.py::TestConvert::test_empty",
"tests/test_binary_quadratic_model.py::TestConvert::test_from_ising",
"tests/test_binary_quadratic_model.py::TestConvert::test_from_numpy_matrix",
"tests/test_binary_quadratic_model.py::TestConvert::test_from_qubo",
"tests/test_binary_quadratic_model.py::TestConvert::test_functional_to_and_from_json",
"tests/test_binary_quadratic_model.py::TestConvert::test_functional_to_and_from_json_empty",
"tests/test_binary_quadratic_model.py::TestConvert::test_functional_to_and_from_json_with_info",
"tests/test_binary_quadratic_model.py::TestConvert::test_info",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_ising_binary_to_ising",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_ising_spin_to_ising",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_json_file",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_json_file_empty",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_json_string",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_json_string_empty",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_networkx_graph",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_numpy_matrix",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_pandas_dataframe",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_qubo_binary_to_qubo",
"tests/test_binary_quadratic_model.py::TestConvert::test_to_qubo_spin_to_qubo"
] | [] | Apache License 2.0 | 2,384 | 861 | [
"dimod/binary_quadratic_model.py",
"dimod/embedding/transforms.py"
] |
|
dwavesystems__dimod-174 | 7b75e47ce4fec541e432f84367ba58393934b941 | 2018-04-10 00:05:58 | 8ebfffa42319aa4850cfc5a1c99a8711eac44722 | diff --git a/dimod/response.py b/dimod/response.py
index d3b2c38d..59d96329 100644
--- a/dimod/response.py
+++ b/dimod/response.py
@@ -87,21 +87,24 @@ class Response(Iterable, Sized):
self._samples_matrix = samples_matrix
num_samples, num_variables = samples_matrix.shape
- if not isinstance(data_vectors, dict):
+ if not isinstance(data_vectors, Mapping):
raise TypeError("expected 'data_vectors' to be a dict")
if 'energy' not in data_vectors:
raise ValueError("energy must be provided")
else:
- data_vectors = data_vectors.copy() # shallow copy
- data_vectors['energy'] = np.asarray(data_vectors['energy'])
- for vector in data_vectors.values():
- # todo - check that is a vector and that has the right length
- if isinstance(vector, (np.ndarray, list)):
- if len(vector) != num_samples:
- raise ValueError(("expected data vector {} (length {}) to be a vector of length {}"
- "").format(vector, len(vector), num_samples))
- else:
- raise TypeError("expected data vector {} to be a list of NumPy array".format(vector))
+ data_vectors = dict(data_vectors) # shallow copy
+
+ for key, vector in iteritems(data_vectors):
+ try:
+ data_vectors[key] = vector = np.asarray(vector)
+ except (ValueError, TypeError):
+ raise TypeError("expected data vector {} to be array-like".format(key))
+
+ shape = vector.shape
+ if not shape or shape[0] != num_samples:
+ raise ValueError(("expected data vector {} (shape {}) to have {} rows"
+ "").format(key, vector.shape, num_samples))
+
self._data_vectors = data_vectors
# vartype is checked by the decorator
@@ -824,10 +827,13 @@ class Response(Iterable, Sized):
# Viewing a Response
###############################################################################################
- def samples(self, sorted_by='energy'):
+ def samples(self, n=None, sorted_by='energy'):
"""Iterate over the samples in the response.
Args:
+ n (int, optional, default=None):
+ The maximum number of samples to provide. If None, all are provided.
+
sorted_by (str/None, optional, default='energy'):
Selects the `data_vector` used to sort the samples. If None, the samples are yielded in
the order given by the samples matrix.
@@ -861,13 +867,21 @@ class Response(Iterable, Sized):
{'a': -1, 'b': 1}
"""
+ num_samples = len(self)
+
+ if n is not None:
+ for sample in itertools.islice(self.samples(n=None, sorted_by=sorted_by), n):
+ yield sample
+ return
+
if sorted_by is None:
- order = np.arange(len(self))
+ order = np.arange(num_samples)
else:
order = np.argsort(self.data_vectors[sorted_by])
samples = self.samples_matrix
label_mapping = self.label_to_idx
+
for idx in order:
yield SampleView(idx, self)
| data_vectors should have either numpy array values, or a list values, but not both.
https://github.com/dwavesystems/dimod/blob/7b75e47ce4fec541e432f84367ba58393934b941/dimod/response.py#L40
Making it be more than one thing requires the parser of the response to inspect the object before using it. If we want to have the benefits of a numpy array for some of the data_vectors, I think it's worth it to make everything a numpy array
If we don't need it to be a numpy array, might as well make them all lists? | dwavesystems/dimod | diff --git a/tests/test_response.py b/tests/test_response.py
index 73c1091e..6c5f827e 100644
--- a/tests/test_response.py
+++ b/tests/test_response.py
@@ -54,7 +54,7 @@ class TestResponse(unittest.TestCase):
npt.assert_equal(samples_matrix, response.samples_matrix)
npt.assert_allclose(energies, response.data_vectors['energy'])
- def test_data_vector_copy(self):
+ def test_data_vectors_copy(self):
samples_matrix = np.matrix([[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 0, 0, 0],
@@ -66,6 +66,80 @@ class TestResponse(unittest.TestCase):
self.assertIsNot(response.data_vectors, data_vectors)
+ def test_data_vectors_are_arrays(self):
+ samples_matrix = np.matrix([[0, 1, 0, 1],
+ [1, 0, 1, 0],
+ [0, 0, 0, 0],
+ [1, 1, 1, 1]])
+ energies = [2, 2, 0, 4]
+ num_occurrences = [1, 1, 2, 1]
+ objects = [object() for __ in range(4)]
+
+ data_vectors = {'energy': energies, 'occurences': num_occurrences, 'objects': objects}
+
+ response = dimod.Response(samples_matrix, data_vectors, dimod.BINARY)
+
+ self.assertEqual(len(response.data_vectors), 3)
+
+ for key in data_vectors:
+ self.assertIn(key, response.data_vectors)
+
+ vector = response.data_vectors[key]
+
+ self.assertIsInstance(vector, np.ndarray)
+
+ self.assertEqual(vector.shape, (4,))
+
+ def test_data_vectors_wrong_length(self):
+ samples_matrix = np.matrix([[0, 1, 0, 1],
+ [1, 0, 1, 0],
+ [0, 0, 0, 0],
+ [1, 1, 1, 1]])
+ energies = [2, 2, 0, 4]
+ num_occurrences = [1, 1, 2, 1, 1]
+ objects = [object() for __ in range(4)]
+
+ data_vectors = {'energy': energies, 'occurences': num_occurrences, 'objects': objects}
+
+ with self.assertRaises(ValueError):
+ response = dimod.Response(samples_matrix, data_vectors, dimod.BINARY)
+
+ def test_data_vectors_not_array_like(self):
+ samples_matrix = np.matrix([[0, 1, 0, 1],
+ [1, 0, 1, 0],
+ [0, 0, 0, 0],
+ [1, 1, 1, 1]])
+ energies = [2, 2, 0, 4]
+ num_occurrences = 'hi there'
+ objects = [object() for __ in range(4)]
+
+ data_vectors = {'energy': energies, 'occurences': num_occurrences, 'objects': objects}
+
+ with self.assertRaises(ValueError):
+ response = dimod.Response(samples_matrix, data_vectors, dimod.BINARY)
+
+ def test_samples_num_limited(self):
+ samples_matrix = np.matrix([[0, 1, 0, 1],
+ [1, 0, 1, 0],
+ [0, 0, 0, 0],
+ [1, 1, 1, 1]])
+ energies = [2, 2, 0, 4]
+ num_occurrences = [1, 1, 2, 1]
+ objects = [object() for __ in range(4)]
+
+ data_vectors = {'energy': energies, 'occurences': num_occurrences, 'objects': objects}
+
+ response = dimod.Response(samples_matrix, data_vectors, dimod.BINARY)
+
+ samples_list = list(response.samples())
+
+ self.assertEqual(len(samples_list), 4)
+
+ shortened_samples_list = list(response.samples(3))
+
+ self.assertEqual(len(shortened_samples_list), 3)
+ self.assertEqual(shortened_samples_list, samples_list[0:3])
+
def test_instantiation_without_energy(self):
samples_matrix = np.matrix([[0, 1, 0, 1],
[1, 0, 1, 0],
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.6 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[all]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt",
"tests/requirements.txt",
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
decorator==5.1.1
-e git+https://github.com/dwavesystems/dimod.git@7b75e47ce4fec541e432f84367ba58393934b941#egg=dimod
docopt==0.6.2
docutils==0.18.1
enum34==1.1.6
execnet==1.9.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==3.0.3
jsonschema==2.6.0
MarkupSafe==2.0.1
mock==2.0.0
networkx==2.0
numpy==1.11.3
packaging==21.3
pandas==0.22.0
pbr==6.1.1
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
requests==2.27.1
six==1.11.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: dimod
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- decorator==5.1.1
- docopt==0.6.2
- docutils==0.18.1
- enum34==1.1.6
- execnet==1.9.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==3.0.3
- jsonschema==2.6.0
- markupsafe==2.0.1
- mock==2.0.0
- networkx==2.0
- numpy==1.11.3
- packaging==21.3
- pandas==0.22.0
- pbr==6.1.1
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- requests==2.27.1
- six==1.11.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/dimod
| [
"tests/test_response.py::TestResponse::test_data_vectors_are_arrays",
"tests/test_response.py::TestResponse::test_data_vectors_not_array_like",
"tests/test_response.py::TestResponse::test_samples_num_limited"
] | [] | [
"tests/test_response.py::TestResponse::test__iter__",
"tests/test_response.py::TestResponse::test_change_vartype_copy",
"tests/test_response.py::TestResponse::test_change_vartype_inplace",
"tests/test_response.py::TestResponse::test_data_docstrings",
"tests/test_response.py::TestResponse::test_data_vectors_copy",
"tests/test_response.py::TestResponse::test_data_vectors_wrong_length",
"tests/test_response.py::TestResponse::test_empty",
"tests/test_response.py::TestResponse::test_from_dicts",
"tests/test_response.py::TestResponse::test_from_dicts_unlike_labels",
"tests/test_response.py::TestResponse::test_from_dicts_unsortable_labels",
"tests/test_response.py::TestResponse::test_from_futures",
"tests/test_response.py::TestResponse::test_from_futures_column_subset",
"tests/test_response.py::TestResponse::test_from_futures_extra_keys",
"tests/test_response.py::TestResponse::test_from_futures_typical",
"tests/test_response.py::TestResponse::test_from_matrix",
"tests/test_response.py::TestResponse::test_from_pandas",
"tests/test_response.py::TestResponse::test_infer_vartype",
"tests/test_response.py::TestResponse::test_instantiation",
"tests/test_response.py::TestResponse::test_instantiation_without_energy",
"tests/test_response.py::TestResponse::test_partial_relabel",
"tests/test_response.py::TestResponse::test_partial_relabel_inplace",
"tests/test_response.py::TestResponse::test_relabel_copy",
"tests/test_response.py::TestResponse::test_relabel_docstring",
"tests/test_response.py::TestResponse::test_update",
"tests/test_response.py::TestResponse::test_update_energy"
] | [] | Apache License 2.0 | 2,385 | 741 | [
"dimod/response.py"
] |
|
oasis-open__cti-python-stix2-165 | 2d689815d743611a8f3ccd48ce5e2d1ec70695e5 | 2018-04-13 15:46:09 | 2d689815d743611a8f3ccd48ce5e2d1ec70695e5 | diff --git a/stix2/properties.py b/stix2/properties.py
index ca7f04c..41841b6 100644
--- a/stix2/properties.py
+++ b/stix2/properties.py
@@ -129,6 +129,8 @@ class ListProperty(Property):
# constructor again
result.append(valid)
continue
+ elif type(self.contained) is DictionaryProperty:
+ obj_type = dict
else:
obj_type = self.contained
diff --git a/stix2/utils.py b/stix2/utils.py
index 9febd78..4ef3d23 100644
--- a/stix2/utils.py
+++ b/stix2/utils.py
@@ -166,7 +166,7 @@ def get_dict(data):
def find_property_index(obj, properties, tuple_to_find):
"""Recursively find the property in the object model, return the index
according to the _properties OrderedDict. If it's a list look for
- individual objects.
+ individual objects. Returns and integer indicating its location
"""
from .base import _STIXBase
try:
@@ -183,6 +183,11 @@ def find_property_index(obj, properties, tuple_to_find):
tuple_to_find)
if val is not None:
return val
+ elif isinstance(item, dict):
+ for idx, val in enumerate(sorted(item)):
+ if (tuple_to_find[0] == val and
+ item.get(val) == tuple_to_find[1]):
+ return idx
elif isinstance(pv, dict):
if pv.get(tuple_to_find[0]) is not None:
try:
| Create an Extension with Dict annidate inside List
Hi,
I'm trying to create a CyberObservable Extension for UserAccount which have to contain a DictionaryProperty() inside a ListProperty(). It is possible? Because when I try to create an extension like this one
```
@CustomExtension(UserAccount, 'ssh_keys', {
keys': ListProperty(DictionaryProperty(), required=True)
})
class SSHKeysExtension:
pass
```
and use it with example = SSHKeysExtension(keys=[{'test123':123, 'test345','aaaa'}])
I obtain a lot of strange errors (the library seems to interpreter the dict as parameters for __init__()
| oasis-open/cti-python-stix2 | diff --git a/stix2/test/test_custom.py b/stix2/test/test_custom.py
index a14503f..b45670f 100644
--- a/stix2/test/test_custom.py
+++ b/stix2/test/test_custom.py
@@ -479,6 +479,27 @@ def test_custom_extension_wrong_observable_type():
assert 'Cannot determine extension type' in excinfo.value.reason
[email protected]("data", [
+ """{
+ "keys": [
+ {
+ "test123": 123,
+ "test345": "aaaa"
+ }
+ ]
+}""",
+])
+def test_custom_extension_with_list_and_dict_properties_observable_type(data):
+ @stix2.observables.CustomExtension(stix2.UserAccount, 'some-extension', [
+ ('keys', stix2.properties.ListProperty(stix2.properties.DictionaryProperty, required=True))
+ ])
+ class SomeCustomExtension:
+ pass
+
+ example = SomeCustomExtension(keys=[{'test123': 123, 'test345': 'aaaa'}])
+ assert data == str(example)
+
+
def test_custom_extension_invalid_observable():
# These extensions are being applied to improperly-created Observables.
# The Observable classes should have been created with the CustomObservable decorator.
diff --git a/stix2/test/test_properties.py b/stix2/test/test_properties.py
index 34edc96..16ff06a 100644
--- a/stix2/test/test_properties.py
+++ b/stix2/test/test_properties.py
@@ -1,6 +1,6 @@
import pytest
-from stix2 import EmailMIMEComponent, ExtensionsProperty, TCPExt
+from stix2 import CustomObject, EmailMIMEComponent, ExtensionsProperty, TCPExt
from stix2.exceptions import AtLeastOnePropertyError, DictionaryKeyError
from stix2.properties import (BinaryProperty, BooleanProperty,
DictionaryProperty, EmbeddedObjectProperty,
@@ -266,6 +266,17 @@ def test_dictionary_property_invalid(d):
assert str(excinfo.value) == d[1]
+def test_property_list_of_dictionary():
+ @CustomObject('x-new-obj', [
+ ('property1', ListProperty(DictionaryProperty(), required=True)),
+ ])
+ class NewObj():
+ pass
+
+ test_obj = NewObj(property1=[{'foo': 'bar'}])
+ assert test_obj.property1[0]['foo'] == 'bar'
+
+
@pytest.mark.parametrize("value", [
{"sha256": "6db12788c37247f2316052e142f42f4b259d6561751e5f401a1ae2a6df9c674b"},
[('MD5', '2dfb1bcc980200c6706feee399d41b3f'), ('RIPEMD-160', 'b3a8cd8a27c90af79b3c81754f267780f443dfef')],
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 2
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[taxii]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
antlr4-python3-runtime==4.9.3
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
bump2version==1.0.1
bumpversion==0.6.0
certifi==2021.5.30
cfgv==3.3.1
charset-normalizer==2.0.12
coverage==6.2
decorator==5.1.1
defusedxml==0.7.1
distlib==0.3.9
docutils==0.18.1
entrypoints==0.4
filelock==3.4.1
identify==2.4.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.2.3
iniconfig==1.1.1
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
Jinja2==3.0.3
jsonschema==3.2.0
jupyter-client==7.1.2
jupyter-core==4.9.2
jupyterlab-pygments==0.1.2
MarkupSafe==2.0.1
mistune==0.8.4
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
nbsphinx==0.3.2
nest-asyncio==1.6.0
nodeenv==1.6.0
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
pyzmq==25.1.2
requests==2.27.1
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==1.5.6
sphinx-prompt==1.5.0
-e git+https://github.com/oasis-open/cti-python-stix2.git@2d689815d743611a8f3ccd48ce5e2d1ec70695e5#egg=stix2
stix2-patterns==2.0.0
taxii2-client==2.3.0
testpath==0.6.0
toml==0.10.2
tomli==1.2.3
tornado==6.1
tox==3.28.0
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.16.2
wcwidth==0.2.13
webencodings==0.5.1
zipp==3.6.0
| name: cti-python-stix2
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- antlr4-python3-runtime==4.9.3
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- bump2version==1.0.1
- bumpversion==0.6.0
- cfgv==3.3.1
- charset-normalizer==2.0.12
- coverage==6.2
- decorator==5.1.1
- defusedxml==0.7.1
- distlib==0.3.9
- docutils==0.18.1
- entrypoints==0.4
- filelock==3.4.1
- identify==2.4.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.2.3
- iniconfig==1.1.1
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- jinja2==3.0.3
- jsonschema==3.2.0
- jupyter-client==7.1.2
- jupyter-core==4.9.2
- jupyterlab-pygments==0.1.2
- markupsafe==2.0.1
- mistune==0.8.4
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbsphinx==0.3.2
- nest-asyncio==1.6.0
- nodeenv==1.6.0
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- pyzmq==25.1.2
- requests==2.27.1
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==1.5.6
- sphinx-prompt==1.5.0
- stix2-patterns==2.0.0
- taxii2-client==2.3.0
- testpath==0.6.0
- toml==0.10.2
- tomli==1.2.3
- tornado==6.1
- tox==3.28.0
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.16.2
- wcwidth==0.2.13
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/cti-python-stix2
| [
"stix2/test/test_custom.py::test_custom_extension_with_list_and_dict_properties_observable_type[{\\n",
"stix2/test/test_properties.py::test_property_list_of_dictionary"
] | [] | [
"stix2/test/test_custom.py::test_identity_custom_property",
"stix2/test/test_custom.py::test_identity_custom_property_invalid",
"stix2/test/test_custom.py::test_identity_custom_property_allowed",
"stix2/test/test_custom.py::test_parse_identity_custom_property[{\\n",
"stix2/test/test_custom.py::test_custom_property_in_bundled_object",
"stix2/test/test_custom.py::test_identity_custom_property_revoke",
"stix2/test/test_custom.py::test_identity_custom_property_edit_markings",
"stix2/test/test_custom.py::test_custom_marking_no_init_1",
"stix2/test/test_custom.py::test_custom_marking_no_init_2",
"stix2/test/test_custom.py::test_custom_object_raises_exception",
"stix2/test/test_custom.py::test_custom_object_type",
"stix2/test/test_custom.py::test_custom_object_no_init_1",
"stix2/test/test_custom.py::test_custom_object_no_init_2",
"stix2/test/test_custom.py::test_parse_custom_object_type",
"stix2/test/test_custom.py::test_parse_unregistered_custom_object_type",
"stix2/test/test_custom.py::test_parse_unregistered_custom_object_type_w_allow_custom",
"stix2/test/test_custom.py::test_custom_observable_object_1",
"stix2/test/test_custom.py::test_custom_observable_object_2",
"stix2/test/test_custom.py::test_custom_observable_object_3",
"stix2/test/test_custom.py::test_custom_observable_raises_exception",
"stix2/test/test_custom.py::test_custom_observable_object_no_init_1",
"stix2/test/test_custom.py::test_custom_observable_object_no_init_2",
"stix2/test/test_custom.py::test_custom_observable_object_invalid_ref_property",
"stix2/test/test_custom.py::test_custom_observable_object_invalid_refs_property",
"stix2/test/test_custom.py::test_custom_observable_object_invalid_refs_list_property",
"stix2/test/test_custom.py::test_custom_observable_object_invalid_valid_refs",
"stix2/test/test_custom.py::test_custom_no_properties_raises_exception",
"stix2/test/test_custom.py::test_custom_wrong_properties_arg_raises_exception",
"stix2/test/test_custom.py::test_parse_custom_observable_object",
"stix2/test/test_custom.py::test_parse_unregistered_custom_observable_object",
"stix2/test/test_custom.py::test_parse_invalid_custom_observable_object",
"stix2/test/test_custom.py::test_observable_custom_property",
"stix2/test/test_custom.py::test_observable_custom_property_invalid",
"stix2/test/test_custom.py::test_observable_custom_property_allowed",
"stix2/test/test_custom.py::test_observed_data_with_custom_observable_object",
"stix2/test/test_custom.py::test_custom_extension_raises_exception",
"stix2/test/test_custom.py::test_custom_extension",
"stix2/test/test_custom.py::test_custom_extension_wrong_observable_type",
"stix2/test/test_custom.py::test_custom_extension_invalid_observable",
"stix2/test/test_custom.py::test_custom_extension_no_properties",
"stix2/test/test_custom.py::test_custom_extension_empty_properties",
"stix2/test/test_custom.py::test_custom_extension_dict_properties",
"stix2/test/test_custom.py::test_custom_extension_no_init_1",
"stix2/test/test_custom.py::test_custom_extension_no_init_2",
"stix2/test/test_custom.py::test_parse_observable_with_custom_extension",
"stix2/test/test_custom.py::test_parse_observable_with_unregistered_custom_extension",
"stix2/test/test_custom.py::test_register_custom_object",
"stix2/test/test_custom.py::test_extension_property_location",
"stix2/test/test_properties.py::test_property",
"stix2/test/test_properties.py::test_basic_clean",
"stix2/test/test_properties.py::test_property_default",
"stix2/test/test_properties.py::test_fixed_property",
"stix2/test/test_properties.py::test_list_property",
"stix2/test/test_properties.py::test_string_property",
"stix2/test/test_properties.py::test_type_property",
"stix2/test/test_properties.py::test_id_property",
"stix2/test/test_properties.py::test_integer_property_valid[2]",
"stix2/test/test_properties.py::test_integer_property_valid[-1]",
"stix2/test/test_properties.py::test_integer_property_valid[3.14]",
"stix2/test/test_properties.py::test_integer_property_valid[False]",
"stix2/test/test_properties.py::test_integer_property_invalid[something]",
"stix2/test/test_properties.py::test_integer_property_invalid[value1]",
"stix2/test/test_properties.py::test_float_property_valid[2]",
"stix2/test/test_properties.py::test_float_property_valid[-1]",
"stix2/test/test_properties.py::test_float_property_valid[3.14]",
"stix2/test/test_properties.py::test_float_property_valid[False]",
"stix2/test/test_properties.py::test_float_property_invalid[something]",
"stix2/test/test_properties.py::test_float_property_invalid[value1]",
"stix2/test/test_properties.py::test_boolean_property_valid[True0]",
"stix2/test/test_properties.py::test_boolean_property_valid[False0]",
"stix2/test/test_properties.py::test_boolean_property_valid[True1]",
"stix2/test/test_properties.py::test_boolean_property_valid[False1]",
"stix2/test/test_properties.py::test_boolean_property_valid[true]",
"stix2/test/test_properties.py::test_boolean_property_valid[false]",
"stix2/test/test_properties.py::test_boolean_property_valid[TRUE]",
"stix2/test/test_properties.py::test_boolean_property_valid[FALSE]",
"stix2/test/test_properties.py::test_boolean_property_valid[T]",
"stix2/test/test_properties.py::test_boolean_property_valid[F]",
"stix2/test/test_properties.py::test_boolean_property_valid[t]",
"stix2/test/test_properties.py::test_boolean_property_valid[f]",
"stix2/test/test_properties.py::test_boolean_property_valid[1]",
"stix2/test/test_properties.py::test_boolean_property_valid[0]",
"stix2/test/test_properties.py::test_boolean_property_invalid[abc]",
"stix2/test/test_properties.py::test_boolean_property_invalid[value1]",
"stix2/test/test_properties.py::test_boolean_property_invalid[value2]",
"stix2/test/test_properties.py::test_boolean_property_invalid[2]",
"stix2/test/test_properties.py::test_boolean_property_invalid[-1]",
"stix2/test/test_properties.py::test_reference_property",
"stix2/test/test_properties.py::test_timestamp_property_valid[2017-01-01T12:34:56Z]",
"stix2/test/test_properties.py::test_timestamp_property_valid[2017-01-01",
"stix2/test/test_properties.py::test_timestamp_property_valid[Jan",
"stix2/test/test_properties.py::test_timestamp_property_invalid",
"stix2/test/test_properties.py::test_binary_property",
"stix2/test/test_properties.py::test_hex_property",
"stix2/test/test_properties.py::test_dictionary_property_valid[d0]",
"stix2/test/test_properties.py::test_dictionary_property_valid[d1]",
"stix2/test/test_properties.py::test_dictionary_property_invalid_key[d0]",
"stix2/test/test_properties.py::test_dictionary_property_invalid_key[d1]",
"stix2/test/test_properties.py::test_dictionary_property_invalid_key[d2]",
"stix2/test/test_properties.py::test_dictionary_property_invalid[d0]",
"stix2/test/test_properties.py::test_dictionary_property_invalid[d1]",
"stix2/test/test_properties.py::test_hashes_property_valid[value0]",
"stix2/test/test_properties.py::test_hashes_property_valid[value1]",
"stix2/test/test_properties.py::test_hashes_property_invalid[value0]",
"stix2/test/test_properties.py::test_hashes_property_invalid[value1]",
"stix2/test/test_properties.py::test_embedded_property",
"stix2/test/test_properties.py::test_enum_property_valid[value0]",
"stix2/test/test_properties.py::test_enum_property_valid[value1]",
"stix2/test/test_properties.py::test_enum_property_valid[b]",
"stix2/test/test_properties.py::test_enum_property_invalid",
"stix2/test/test_properties.py::test_extension_property_valid",
"stix2/test/test_properties.py::test_extension_property_invalid[1]",
"stix2/test/test_properties.py::test_extension_property_invalid[data1]",
"stix2/test/test_properties.py::test_extension_property_invalid_type",
"stix2/test/test_properties.py::test_extension_at_least_one_property_constraint"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,403 | 389 | [
"stix2/properties.py",
"stix2/utils.py"
] |
|
PlasmaPy__PlasmaPy-361 | b550058279e51ad7da88282e19283760adb9c9a2 | 2018-04-15 18:27:51 | b550058279e51ad7da88282e19283760adb9c9a2 | diff --git a/plasmapy/physics/parameters.py b/plasmapy/physics/parameters.py
index d2354c91..40745ed2 100644
--- a/plasmapy/physics/parameters.py
+++ b/plasmapy/physics/parameters.py
@@ -334,9 +334,11 @@ def ion_sound_speed(T_e,
@utils.check_relativistic
@utils.check_quantity({
- 'T': {'units': u.K, 'can_be_negative': False}
+ 'T': {'units': u.K, 'can_be_negative': False},
+ 'mass': {'units': u.kg, 'can_be_negative': False, 'can_be_nan': True}
})
-def thermal_speed(T, particle="e-", method="most_probable"):
[email protected]_input
+def thermal_speed(T, particle: atomic.Particle="e-", method="most_probable", mass=np.nan*u.kg):
r"""
Return the most probable speed for a particle within a Maxwellian
distribution.
@@ -356,6 +358,11 @@ def thermal_speed(T, particle="e-", method="most_probable"):
Method to be used for calculating the thermal speed. Options are
`'most_probable'` (default), `'rms'`, and `'mean_magnitude'`.
+ mass : ~astropy.units.Quantity
+ The particle's mass override. Defaults to NaN and if so, doesn't do
+ anything, but if set, overrides mass acquired from `particle`. Useful
+ with relative velocities of particles.
+
Returns
-------
V : ~astropy.units.Quantity
@@ -417,10 +424,7 @@ def thermal_speed(T, particle="e-", method="most_probable"):
T = T.to(u.K, equivalencies=u.temperature_energy())
- try:
- m = atomic.particle_mass(particle)
- except AtomicError:
- raise ValueError("Unable to find {particle} mass in thermal_speed")
+ m = mass if np.isfinite(mass) else atomic.particle_mass(particle)
# different methods, as per https://en.wikipedia.org/wiki/Thermal_velocity
if method == "most_probable":
diff --git a/plasmapy/physics/transport/collisions.py b/plasmapy/physics/transport/collisions.py
index b061e316..82868951 100644
--- a/plasmapy/physics/transport/collisions.py
+++ b/plasmapy/physics/transport/collisions.py
@@ -7,14 +7,13 @@
import warnings
# plasmapy modules
-import plasmapy.atomic as atomic
from plasmapy import utils
from plasmapy.utils.checks import (check_quantity,
_check_relativistic)
from plasmapy.constants import (c, m_e, k_B, e, eps0, pi, hbar)
-from plasmapy.atomic import (particle_mass, integer_charge)
-from plasmapy.physics.parameters import (Debye_length)
+from plasmapy import atomic
+from plasmapy.physics import parameters
from plasmapy.physics.quantum import (Wigner_Seitz_radius,
thermal_deBroglie_wavelength,
chemical_potential)
@@ -246,29 +245,16 @@ def _boilerPlate(T, particles, V):
"list or tuple containing representations of two "
f"charged particles. Got {particles} instead.")
- masses = np.zeros(2) * u.kg
- charges = np.zeros(2) * u.C
-
- for particle, i in zip(particles, range(2)):
-
- try:
- masses[i] = particle_mass(particles[i])
- except Exception:
- raise ValueError("Unable to find mass of particle: "
- f"{particles[i]}.")
- try:
- charges[i] = np.abs(e * integer_charge(particles[i]))
- if charges[i] is None:
- raise ValueError("Unable to find charge of particle: "
- f"{particles[i]}.")
- except Exception:
- raise ValueError("Unable to find charge of particle: "
- f"{particles[i]}.")
+ particles = [atomic.Particle(p) for p in particles]
+ masses = [p.mass for p in particles]
+ charges = [np.abs(p.charge) for p in particles]
+
# obtaining reduced mass of 2 particle collision system
- reduced_mass = masses[0] * masses[1] / (masses[0] + masses[1])
+ reduced_mass = atomic.reduced_mass(*particles)
+
# getting thermal velocity of system if no velocity is given
if np.isnan(V):
- V = np.sqrt(2 * k_B * T / reduced_mass).to(u.m / u.s)
+ V = parameters.thermal_speed(T, mass=reduced_mass)
_check_relativistic(V, 'V')
return T, masses, charges, reduced_mass, V
@@ -485,7 +471,7 @@ def impact_parameter(T,
raise ValueError("Must provide a z_mean for GMS-2, GMS-5, and "
"GMS-6 methods.")
# Debye length
- lambdaDe = Debye_length(T, n_e)
+ lambdaDe = parameters.Debye_length(T, n_e)
# deBroglie wavelength
lambdaBroglie = hbar / (2 * reduced_mass * V)
# distance of closest approach in 90 degree Coulomb collision
| Split up `_boilerPlate()` in `transport.py`
`_boilerPlate()` currently does a few different things like fetching particle data, calculating thermal velocity, tests/checks. Each piece of functionality should be split into its own function.
See #191 | PlasmaPy/PlasmaPy | diff --git a/plasmapy/physics/tests/test_distribution.py b/plasmapy/physics/tests/test_distribution.py
index f4c3f3ba..35e7ffb0 100644
--- a/plasmapy/physics/tests/test_distribution.py
+++ b/plasmapy/physics/tests/test_distribution.py
@@ -104,16 +104,6 @@ def test_std(self):
T_distri = (std**2 / k_B * m_e).to(u.K)
assert np.isclose(T_distri.value, self.T_e.value)
- def test_valErr(self):
- """
- Tests whether ValueError is raised when invalid particle name
- string is passed.
- """
- with pytest.raises(ValueError):
- Maxwellian_1D(1 * u.m / u.s,
- T=1 * u.K,
- particle='XXX')
-
def test_units_no_vTh(self):
"""
Tests distribution function with units, but not passing vTh.
@@ -813,17 +803,6 @@ def test_std(self):
T_distri = (std**2 / k_B * m_e).to(u.K)
assert np.isclose(T_distri.value, self.T_e.value)
- def test_valErr(self):
- """
- Tests whether ValueError is raised when invalid particle name
- string is passed.
- """
- with pytest.raises(ValueError):
- kappa_velocity_1D(1 * u.m / u.s,
- T=1 * u.K,
- kappa=self.kappa,
- particle='XXX')
-
def test_units_no_vTh(self):
"""
Tests distribution function with units, but not passing vTh.
diff --git a/plasmapy/physics/tests/test_parameters.py b/plasmapy/physics/tests/test_parameters.py
index c9a0e5f5..30ebb26a 100644
--- a/plasmapy/physics/tests/test_parameters.py
+++ b/plasmapy/physics/tests/test_parameters.py
@@ -305,7 +305,7 @@ def test_thermal_speed():
with pytest.raises(RelativityError):
thermal_speed(1e14 * u.K, particle='p')
- with pytest.raises(ValueError):
+ with pytest.raises(InvalidParticleError):
thermal_speed(T_i, particle='asdfasd')
with pytest.warns(u.UnitsWarning):
@@ -561,9 +561,6 @@ def test_gyroradius():
with pytest.raises(TypeError):
gyroradius(u.T, particle="p", Vperp=8 * u.m / u.s)
- with pytest.raises(ValueError):
- gyroradius(B, particle='asfdas', T_i=T_i)
-
with pytest.raises(ValueError):
gyroradius(B, particle='p', T_i=-1 * u.K)
diff --git a/plasmapy/physics/transport/tests/test_collisions.py b/plasmapy/physics/transport/tests/test_collisions.py
index bc60f0f4..5a2ceaa6 100644
--- a/plasmapy/physics/transport/tests/test_collisions.py
+++ b/plasmapy/physics/transport/tests/test_collisions.py
@@ -11,7 +11,7 @@
Knudsen_number,
coupling_parameter)
from plasmapy.physics.transport.collisions import Spitzer_resistivity
-from plasmapy.utils import RelativityWarning, RelativityError, PhysicsWarning
+from plasmapy.utils import exceptions
from plasmapy.constants import m_p, m_e, c
@@ -117,7 +117,7 @@ def test_Chen_fusion(self):
# velocity. Chen uses v**2 = k * T / m whereas we use
# v ** 2 = 2 * k * T / m
lnLambdaChen = 16 + np.log(2)
- with pytest.warns(RelativityWarning):
+ with pytest.warns(exceptions.RelativityWarning):
lnLambda = Coulomb_logarithm(T, n, ('e', 'p'))
testTrue = np.isclose(lnLambda,
lnLambdaChen,
@@ -140,7 +140,7 @@ def test_Chen_laser(self):
# velocity. Chen uses v**2 = k * T / m whereas we use
# v ** 2 = 2 * k * T / m
lnLambdaChen = 6.8 + np.log(2)
- with pytest.warns(RelativityWarning):
+ with pytest.warns(exceptions.RelativityWarning):
lnLambda = Coulomb_logarithm(T, n, ('e', 'p'))
testTrue = np.isclose(lnLambda,
lnLambdaChen,
@@ -155,7 +155,7 @@ def test_GMS1(self):
Test for first version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -176,7 +176,7 @@ def test_GMS1_negative(self):
Murillo, and Schlanges PRE (2002). This checks for when
a negative (invalid) Coulomb logarithm is returned.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -196,7 +196,7 @@ def test_GMS2(self):
Test for second version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -217,7 +217,7 @@ def test_GMS2_negative(self):
Murillo, and Schlanges PRE (2002). This checks for when
a negative (invalid) Coulomb logarithm is returned.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -237,7 +237,7 @@ def test_GMS3(self):
Test for third version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -259,7 +259,7 @@ def test_GMS3_negative(self):
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -279,7 +279,7 @@ def test_GMS4(self):
Test for fourth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -301,7 +301,7 @@ def test_GMS4_negative(self):
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -321,7 +321,7 @@ def test_GMS5(self):
Test for fifth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -343,7 +343,7 @@ def test_GMS5_negative(self):
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -363,7 +363,7 @@ def test_GMS6(self):
Test for sixth version of Coulomb logarithm from Gericke,
Murillo, and Schlanges PRE (2002).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature1,
self.density1,
self.particles,
@@ -385,7 +385,7 @@ def test_GMS6_negative(self):
a positive value is returned whereas the classical Coulomb
logarithm would return a negative value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Coulomb_logarithm(self.temperature2,
self.density2,
self.particles,
@@ -435,12 +435,12 @@ def test_GMS6_zmean_error(self):
def test_relativity_warn(self):
"""Tests whether relativity warning is raised at high velocity."""
- with pytest.warns(RelativityWarning):
+ with pytest.warns(exceptions.RelativityWarning):
Coulomb_logarithm(1e5 * u.K, 1 * u.m ** -3, ('e', 'p'), V=0.9 * c)
def test_relativity_error(self):
"""Tests whether relativity error is raised at light speed."""
- with pytest.raises(RelativityError):
+ with pytest.raises(exceptions.RelativityError):
Coulomb_logarithm(1e5 * u.K, 1 * u.m ** -3, ('e', 'p'), V=1.1 * c)
def test_unit_conversion_error(self):
@@ -464,7 +464,7 @@ def test_invalid_particle_error(self):
Tests whether an error is raised when an invalid particle name
is given.
"""
- with pytest.raises(ValueError):
+ with pytest.raises(exceptions.InvalidParticleError):
Coulomb_logarithm(1 * u.K, 5 * u.m ** -3, ('e', 'g'))
n_e = np.array([1e9, 1e9, 1e24]) * u.cm ** -3
@@ -605,7 +605,7 @@ def test_known1(self):
"""
Test for known value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.particles,
@@ -626,7 +626,7 @@ def test_fail1(self):
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.particles,
@@ -645,7 +645,7 @@ def test_electrons(self):
"""
Testing collision frequency between electrons.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.electrons,
@@ -664,7 +664,7 @@ def test_protons(self):
"""
Testing collision frequency between protons (ions).
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.protons,
@@ -683,7 +683,7 @@ def test_zmean(self):
"""
Test collisional frequency function when given arbitrary z_mean.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = collision_frequency(self.T,
self.n,
self.particles,
@@ -714,7 +714,7 @@ def test_known1(self):
"""
Test for known value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mean_free_path(self.T,
self.n_e,
self.particles,
@@ -735,7 +735,7 @@ def test_fail1(self):
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mean_free_path(self.T,
self.n_e,
self.particles,
@@ -834,7 +834,7 @@ def test_known1(self):
"""
Test for known value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mobility(self.T,
self.n_e,
self.particles,
@@ -855,7 +855,7 @@ def test_fail1(self):
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mobility(self.T,
self.n_e,
self.particles,
@@ -872,7 +872,7 @@ def test_fail1(self):
def test_zmean(self):
"""Testing mobility when z_mean is passed."""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = mobility(self.T,
self.n_e,
self.particles,
@@ -904,7 +904,7 @@ def test_known1(self):
"""
Test for known value.
"""
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Knudsen_number(self.length,
self.T,
self.n_e,
@@ -926,7 +926,7 @@ def test_fail1(self):
value comparison by some quantity close to numerical error.
"""
fail1 = self.True1 * (1 + 1e-15)
- with pytest.warns(PhysicsWarning, match="strong coupling effects"):
+ with pytest.warns(exceptions.PhysicsWarning, match="strong coupling effects"):
methodVal = Knudsen_number(self.length,
self.T,
self.n_e,
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/automated-code-tests.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asteval==1.0.6
astropy==6.0.1
astropy-iers-data==0.2025.3.31.0.36.18
certifi==2025.1.31
charset-normalizer==3.4.1
colorama==0.4.6
contourpy==1.3.0
coverage==7.8.0
coveralls==4.0.1
cycler==0.12.1
Cython==3.0.12
dill==0.3.9
docopt==0.6.2
exceptiongroup==1.2.2
flake8==7.2.0
fonttools==4.56.0
idna==3.10
importlib_resources==6.5.2
iniconfig==2.1.0
kiwisolver==1.4.7
lmfit==1.3.3
matplotlib==3.9.4
mccabe==0.7.0
mpmath==1.3.0
numpy==1.26.4
packaging==24.2
pillow==11.1.0
-e git+https://github.com/PlasmaPy/PlasmaPy.git@b550058279e51ad7da88282e19283760adb9c9a2#egg=plasmapy
pluggy==1.5.0
pycodestyle==2.13.0
pyerfa==2.0.1.5
pyflakes==3.3.2
pyparsing==3.2.3
pytest==8.3.5
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
roman==5.0
scipy==1.13.1
six==1.17.0
tomli==2.2.1
uncertainties==3.2.2
urllib3==2.3.0
zipp==3.21.0
| name: PlasmaPy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asteval==1.0.6
- astropy==6.0.1
- astropy-iers-data==0.2025.3.31.0.36.18
- certifi==2025.1.31
- charset-normalizer==3.4.1
- colorama==0.4.6
- contourpy==1.3.0
- coverage==7.8.0
- coveralls==4.0.1
- cycler==0.12.1
- cython==3.0.12
- dill==0.3.9
- docopt==0.6.2
- exceptiongroup==1.2.2
- flake8==7.2.0
- fonttools==4.56.0
- idna==3.10
- importlib-resources==6.5.2
- iniconfig==2.1.0
- kiwisolver==1.4.7
- lmfit==1.3.3
- matplotlib==3.9.4
- mccabe==0.7.0
- mpmath==1.3.0
- numpy==1.26.4
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pycodestyle==2.13.0
- pyerfa==2.0.1.5
- pyflakes==3.3.2
- pyparsing==3.2.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- pyyaml==6.0.2
- requests==2.32.3
- roman==5.0
- scipy==1.13.1
- six==1.17.0
- tomli==2.2.1
- uncertainties==3.2.2
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/PlasmaPy
| [
"plasmapy/physics/tests/test_parameters.py::test_thermal_speed",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_invalid_particle_error"
] | [
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_units_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_units_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_unitless_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_unitless_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_zero_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_units_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_units_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_unitless_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_unitless_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_zero_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_units_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_units_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_unitless_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_unitless_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_zero_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_units_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_units_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_unitless_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_unitless_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_zero_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_units_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_units_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_unitless_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_unitless_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_zero_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_value_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_units_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_units_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_unitless_no_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_unitless_vTh",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_zero_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_value_drift_units",
"plasmapy/physics/tests/test_parameters.py::test_Alfven_speed",
"plasmapy/physics/tests/test_parameters.py::test_ion_sound_speed",
"plasmapy/physics/tests/test_parameters.py::test_gyrofrequency",
"plasmapy/physics/tests/test_parameters.py::test_plasma_frequency",
"plasmapy/physics/tests/test_parameters.py::test_magnetic_energy_density",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS1_negative",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS2",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS2_negative",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS3",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS4",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS4_negative",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS5",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS5_negative",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS6",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS6_negative",
"plasmapy/physics/transport/tests/test_collisions.py::Test_coupling_parameter::test_quantum"
] | [
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_max_noDrift",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_max_drift",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_norm",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_std",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_1D::test_value_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_norm",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_1D::test_value_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_norm",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_velocity_3D::test_value_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_norm",
"plasmapy/physics/tests/test_distribution.py::Test_Maxwellian_speed_3D::test_value_drift_units",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_invalid_kappa",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_max_noDrift",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_max_drift",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_maxwellian_limit",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_norm",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_1D::test_std",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_invalid_kappa",
"plasmapy/physics/tests/test_distribution.py::Test_kappa_velocity_3D::test_norm",
"plasmapy/physics/tests/test_parameters.py::Test_mass_density::test_particleless",
"plasmapy/physics/tests/test_parameters.py::Test_mass_density::test_wrong_units",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_invalid_kappa",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_invalid_method",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_probable1",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_rms1",
"plasmapy/physics/tests/test_parameters.py::Test_kappa_thermal_speed::test_mean1",
"plasmapy/physics/tests/test_parameters.py::test_gyroradius",
"plasmapy/physics/tests/test_parameters.py::test_Debye_length",
"plasmapy/physics/tests/test_parameters.py::test_Debye_number",
"plasmapy/physics/tests/test_parameters.py::test_inertial_length",
"plasmapy/physics/tests/test_parameters.py::test_magnetic_pressure",
"plasmapy/physics/tests/test_parameters.py::test_upper_hybrid_frequency",
"plasmapy/physics/tests/test_parameters.py::test_lower_hybrid_frequency",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_Chen_Q_machine",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_Chen_lab",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_Chen_torus",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_Chen_fusion",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_Chen_laser",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS3_negative",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS2_zmean_error",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS5_zmean_error",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_GMS6_zmean_error",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_relativity_warn",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_relativity_error",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_unit_conversion_error",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Coulomb_logarithm::test_single_particle_error",
"plasmapy/physics/transport/tests/test_collisions.py::Test_b_perp::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_b_perp::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_impact_parameter::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_impact_parameter::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_impact_parameter::test_bad_method",
"plasmapy/physics/transport/tests/test_collisions.py::Test_collision_frequency::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_collision_frequency::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_collision_frequency::test_electrons",
"plasmapy/physics/transport/tests/test_collisions.py::Test_collision_frequency::test_protons",
"plasmapy/physics/transport/tests/test_collisions.py::Test_collision_frequency::test_zmean",
"plasmapy/physics/transport/tests/test_collisions.py::Test_mean_free_path::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_mean_free_path::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Spitzer_resistivity::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Spitzer_resistivity::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Spitzer_resistivity::test_zmean",
"plasmapy/physics/transport/tests/test_collisions.py::Test_mobility::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_mobility::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_mobility::test_zmean",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Knudsen_number::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_Knudsen_number::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_coupling_parameter::test_known1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_coupling_parameter::test_fail1",
"plasmapy/physics/transport/tests/test_collisions.py::Test_coupling_parameter::test_zmean"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,409 | 1,273 | [
"plasmapy/physics/parameters.py",
"plasmapy/physics/transport/collisions.py"
] |
|
TheFriendlyCoder__friendlypins-38 | b52793c458ee2bc4057c22a233d43cc2b1439f8c | 2018-04-15 23:57:05 | b52793c458ee2bc4057c22a233d43cc2b1439f8c | diff --git a/setup.py b/setup.py
index 6604fe6..7c44b31 100755
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,9 @@ PROJECT_NAME = 'friendlypins'
PROJECT_DEPENDENCIES = [
'requests',
'six',
- 'dateutils']
+ 'dateutils',
+ 'tqdm',
+ 'pillow']
PROJECT_DEV_DEPENDENCIES = [
'wheel<1.0.0',
'twine<2.0.0',
diff --git a/src/friendlypins/headers.py b/src/friendlypins/headers.py
index 5df1ea4..b05b9f8 100644
--- a/src/friendlypins/headers.py
+++ b/src/friendlypins/headers.py
@@ -79,5 +79,13 @@ class Headers(object):
# return time data in current locale for convenience
return date_with_tz.astimezone(tz.tzlocal())
+ @property
+ def bytes(self):
+ """Gets the number of bytes contained in the response data
+
+ :rtype: :class:`int`
+ """
+ return int(self._data['Content-Length'])
+
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/utils/console_actions.py b/src/friendlypins/utils/console_actions.py
index 84c63b7..864bfbc 100644
--- a/src/friendlypins/utils/console_actions.py
+++ b/src/friendlypins/utils/console_actions.py
@@ -3,7 +3,9 @@ import logging
import os
from six.moves import urllib
import requests
+from tqdm import tqdm
from friendlypins.api import API
+from friendlypins.headers import Headers
def _download_pin(pin, folder):
"""Helper method for downloading a thumbnail from a single pin
@@ -15,6 +17,7 @@ def _download_pin(pin, folder):
:rtype: :class:`int`
"""
log = logging.getLogger(__name__)
+
temp_url = urllib.parse.urlparse(pin.thumbnail.url)
temp_filename = os.path.basename(temp_url.path)
output_file = os.path.join(folder, temp_filename)
@@ -28,6 +31,9 @@ def _download_pin(pin, folder):
try:
response = requests.get(pin.thumbnail.url, stream=True)
response.raise_for_status()
+ headers = Headers(response.headers)
+ log.debug(headers)
+
with open(output_file, "wb") as handle:
for data in response.iter_content():
handle.write(data)
@@ -64,16 +70,18 @@ def download_thumbnails(api_token, board_name, output_folder, delete):
return 1
all_pins = selected_board.all_pins
- log.info('Downloading %s thumbnails...', len(all_pins))
+ log.info('Downloading thumbnails...')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
- for cur_pin in all_pins:
- retval = _download_pin(cur_pin, output_folder)
- if retval:
- return retval
- if delete:
- cur_pin.delete()
+ with tqdm(total=selected_board.num_pins, unit='b', ncols=80) as pbar:
+ for cur_pin in all_pins:
+ retval = _download_pin(cur_pin, output_folder)
+ if retval:
+ return retval
+ if delete:
+ cur_pin.delete()
+ pbar.update()
return 0
| Add progress bar support to fpins console app
To make it easier to track the overall progress of a lengthy download operation, we should add support for showing a progress bar to the fpins console app. | TheFriendlyCoder/friendlypins | diff --git a/unit_tests/test_console_actions.py b/unit_tests/test_console_actions.py
index d92a3e9..b91cd0d 100644
--- a/unit_tests/test_console_actions.py
+++ b/unit_tests/test_console_actions.py
@@ -28,7 +28,10 @@ def test_download_thumbnails(api_requests, user_requests, board_requests, action
"data": [{
"id": "6789",
"name": expected_board_name,
- "url": "https://www.pinterest.ca/MyName/MyBoard/"
+ "url": "https://www.pinterest.ca/MyName/MyBoard/",
+ "counts": {
+ "pins": 1
+ }
}]
}
@@ -110,7 +113,10 @@ def test_download_thumbnails_with_delete(pin_requests, api_requests, user_reques
"data": [{
"id": "6789",
"name": expected_board_name,
- "url": "https://www.pinterest.ca/MyName/MyBoard/"
+ "url": "https://www.pinterest.ca/MyName/MyBoard/",
+ "counts": {
+ "pins": 1
+ }
}]
}
@@ -194,7 +200,10 @@ def test_download_thumbnails_error(api_requests, user_requests, board_requests,
"data": [{
"id": "6789",
"name": expected_board_name,
- "url": "https://www.pinterest.ca/MyName/MyBoard/"
+ "url": "https://www.pinterest.ca/MyName/MyBoard/",
+ "counts": {
+ "pins": 1
+ }
}]
}
@@ -278,7 +287,10 @@ def test_download_thumbnails_missing_board(api_requests, user_requests, board_re
"data": [{
"id": "6789",
"name": "MyBoard",
- "url": "https://www.pinterest.ca/MyName/MyBoard/"
+ "url": "https://www.pinterest.ca/MyName/MyBoard/",
+ "counts": {
+ "pins": 1
+ }
}]
}
@@ -358,7 +370,10 @@ def test_download_thumbnails_exists(api_requests, user_requests, board_requests,
"data": [{
"id": "6789",
"name": expected_board_name,
- "url": "https://www.pinterest.ca/MyName/MyBoard/"
+ "url": "https://www.pinterest.ca/MyName/MyBoard/",
+ "counts": {
+ "pins": 1
+ }
}]
}
diff --git a/unit_tests/test_headers.py b/unit_tests/test_headers.py
index e7bf38f..0a5e77e 100644
--- a/unit_tests/test_headers.py
+++ b/unit_tests/test_headers.py
@@ -3,8 +3,9 @@ import mock
from friendlypins.headers import Headers
from dateutil import tz
-sample_rate_limit = "200"
-sample_rate_max = "150"
+sample_rate_limit = 200
+sample_rate_max = 150
+sample_content_length = 1024
sample_header = {
'Access-Control-Allow-Origin': '*',
'Age': '0',
@@ -14,12 +15,13 @@ sample_header = {
'Pinterest-Version': 'e3f92ef',
'X-Content-Type-Options': 'nosniff',
'X-Pinterest-RID': '12345678',
- 'X-Ratelimit-Limit': sample_rate_limit,
- 'X-Ratelimit-Remaining': sample_rate_max,
+ 'X-Ratelimit-Limit': str(sample_rate_limit),
+ 'X-Ratelimit-Remaining': str(sample_rate_max),
'Transfer-Encoding': 'chunked',
'Date': 'Sat, 31 Mar 2018 10:58:09 GMT',
'Connection': 'keep-alive',
- 'Pinterest-Generated-By': ''
+ 'Pinterest-Generated-By': '',
+ 'Content-Length': str(sample_content_length)
}
@@ -31,18 +33,22 @@ def test_get_date_locale():
def test_get_rate_limit():
obj = Headers(sample_header)
- assert obj.rate_limit == 200
+ assert obj.rate_limit == sample_rate_limit
def test_get_rate_max():
obj = Headers(sample_header)
- assert obj.rate_remaining == 150
+ assert obj.rate_remaining == sample_rate_max
def test_get_rate_percent():
obj = Headers(sample_header)
assert obj.percent_rate_remaining == 75
+def test_get_num_bytes():
+ obj = Headers(sample_header)
+
+ assert obj.bytes == sample_content_length
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
\ No newline at end of file
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pylint",
"pytest",
"pytest-cov",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.11.7
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
bleach==4.1.0
cachetools==4.2.4
certifi==2021.5.30
chardet==5.0.0
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
dateutils==0.6.12
dill==0.3.4
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
-e git+https://github.com/TheFriendlyCoder/friendlypins.git@b52793c458ee2bc4057c22a233d43cc2b1439f8c#egg=friendlypins
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==5.10.1
Jinja2==3.0.3
lazy-object-proxy==1.7.1
mando==0.7.1
MarkupSafe==2.0.1
mccabe==0.7.0
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pkginfo==1.10.0
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
Pygments==2.14.0
pylint==2.13.9
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
radon==6.0.1
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==4.0.0a9
tqdm==4.64.1
twine==1.15.0
typed-ast==1.5.5
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
webencodings==0.5.1
wrapt==1.16.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: friendlypins
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.11.7
- babel==2.11.0
- bleach==4.1.0
- cachetools==4.2.4
- chardet==5.0.0
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- dateutils==0.6.12
- dill==0.3.4
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- isort==5.10.1
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- mando==0.7.1
- markupsafe==2.0.1
- mccabe==0.7.0
- mock==5.2.0
- pkginfo==1.10.0
- platformdirs==2.4.0
- pygments==2.14.0
- pylint==2.13.9
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- radon==6.0.1
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tox==4.0.0a9
- tqdm==4.64.1
- twine==1.15.0
- typed-ast==1.5.5
- urllib3==1.26.20
- virtualenv==20.17.1
- webencodings==0.5.1
- wrapt==1.16.0
prefix: /opt/conda/envs/friendlypins
| [
"unit_tests/test_headers.py::test_get_num_bytes"
] | [] | [
"unit_tests/test_console_actions.py::test_download_thumbnails",
"unit_tests/test_console_actions.py::test_download_thumbnails_with_delete",
"unit_tests/test_console_actions.py::test_download_thumbnails_error",
"unit_tests/test_console_actions.py::test_download_thumbnails_missing_board",
"unit_tests/test_console_actions.py::test_download_thumbnails_exists",
"unit_tests/test_headers.py::test_get_date_locale",
"unit_tests/test_headers.py::test_get_rate_limit",
"unit_tests/test_headers.py::test_get_rate_max",
"unit_tests/test_headers.py::test_get_rate_percent"
] | [] | Apache License 2.0 | 2,411 | 788 | [
"setup.py",
"src/friendlypins/headers.py",
"src/friendlypins/utils/console_actions.py"
] |
|
TheFriendlyCoder__friendlypins-45 | 9c0aa4ebcde5ad444e342aaa2b3315339dbc36f6 | 2018-04-16 03:40:54 | 9c0aa4ebcde5ad444e342aaa2b3315339dbc36f6 | diff --git a/src/friendlypins/board.py b/src/friendlypins/board.py
index 045655f..7f51a3b 100644
--- a/src/friendlypins/board.py
+++ b/src/friendlypins/board.py
@@ -64,16 +64,12 @@ class Board(object):
return int(self._data['counts']['pins'])
@property
- def all_pins(self):
- """Gets a list of all pins from this board
+ def pins(self):
+ """Generator for iterating over the pins linked to this board
- NOTE: This process may take a long time to complete and require
- a lot of memory for boards that contain large numbers of pins
-
- :rtype: :class:`list` of :class:`friendlypins.pin.Pin`
+ :rtype: Generator of :class:`friendlypins.pin.Pin`
"""
- self._log.debug('Gettings all pins for board %s...', self.name)
- retval = list()
+ self._log.debug('Loading pins for board %s...', self.name)
properties = {
"fields": ','.join([
@@ -93,19 +89,20 @@ class Board(object):
])
}
+ page = 0
while True:
+ self._log.debug("Loading pins page %s", page)
result = self._io.get(
"boards/{0}/pins".format(self.unique_id),
properties)
assert 'data' in result
for cur_item in result['data']:
- retval.append(Pin(cur_item, self._io))
+ yield Pin(cur_item, self._io)
if not result["page"]["cursor"]:
break
properties["cursor"] = result["page"]["cursor"]
-
- return retval
+ page += 1
if __name__ == "__main__":
diff --git a/src/friendlypins/user.py b/src/friendlypins/user.py
index de3306d..3d69138 100644
--- a/src/friendlypins/user.py
+++ b/src/friendlypins/user.py
@@ -94,21 +94,39 @@ class User(object):
@property
def boards(self):
- """Gets a list of boards owned by this user
+ """Generator for iterating over the boards owned by this user
- :rtype: :class:`list` of :class:`friendlypins.board.Board`
+ :rtype: Generator of :class:`friendlypins.board.Board`
"""
- self._log.debug("Loading boards for user %s...", self.name)
-
- fields = "id,name,url,description,creator,created_at,counts,image"
- result = self._io.get('me/boards', {"fields": fields})
-
- assert 'data' in result
-
- retval = []
- for cur_item in result['data']:
- retval.append(Board(cur_item, self._io))
- return retval
+ self._log.debug('Loading boards for user %s...', self.name)
+
+ properties = {
+ "fields": ','.join([
+ "id",
+ "name",
+ "url",
+ "description",
+ "creator",
+ "created_at",
+ "counts",
+ "image"
+ ])
+ }
+
+ page = 0
+ while True:
+ self._log.debug("Loading boards page %s", page)
+ result = self._io.get("me/boards", properties)
+ assert 'data' in result
+
+ for cur_item in result['data']:
+ yield Board(cur_item, self._io)
+
+ if not result["page"]["cursor"]:
+ break
+
+ properties["cursor"] = result["page"]["cursor"]
+ page += 1
if __name__ == "__main__":
diff --git a/src/friendlypins/utils/console_actions.py b/src/friendlypins/utils/console_actions.py
index 1ca66ef..da40dfc 100644
--- a/src/friendlypins/utils/console_actions.py
+++ b/src/friendlypins/utils/console_actions.py
@@ -71,13 +71,12 @@ def download_thumbnails(api_token, board_name, output_folder, delete):
log.error("Could not find selected board: %s", board_name)
return 1
- all_pins = selected_board.all_pins
log.info('Downloading thumbnails...')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
with tqdm(total=selected_board.num_pins, unit='b', ncols=80) as pbar:
- for cur_pin in all_pins:
+ for cur_pin in selected_board.pins:
retval = _download_pin(cur_pin, output_folder)
if retval:
return retval
| lazy load boards and pins
To simplify and optimize the interactions with boards and pins we should lazy-load the data from these two API calls using iterators:
User.boards
Board.all_pins | TheFriendlyCoder/friendlypins | diff --git a/unit_tests/test_board.py b/unit_tests/test_board.py
index 5daac85..2314b19 100644
--- a/unit_tests/test_board.py
+++ b/unit_tests/test_board.py
@@ -24,7 +24,7 @@ def test_board_properties():
assert obj.num_pins == expected_pin_count
-def test_get_all_pins():
+def test_get_pins():
data = {
'id': '987654321',
'name': 'MyBoard'
@@ -54,7 +54,9 @@ def test_get_all_pins():
mock_io.get.return_value = expected_data
obj = Board(data, mock_io)
- result = obj.all_pins
+ result = list()
+ for item in obj.pins:
+ result.append(item)
assert len(result) == 1
assert expected_url == result[0].url
diff --git a/unit_tests/test_console_actions.py b/unit_tests/test_console_actions.py
index 170f224..92138cc 100644
--- a/unit_tests/test_console_actions.py
+++ b/unit_tests/test_console_actions.py
@@ -267,7 +267,10 @@ def test_download_thumbnails_missing_board(rest_io, action_requests, mock_open,
"counts": {
"pins": 1
}
- }]
+ }],
+ "page": {
+ "cursor": None
+ }
}
# Fake pin data for the fake board, with fake thumbnail metadata
diff --git a/unit_tests/test_user.py b/unit_tests/test_user.py
index 9184835..0512b58 100644
--- a/unit_tests/test_user.py
+++ b/unit_tests/test_user.py
@@ -44,13 +44,19 @@ def test_get_boards():
"id": str(expected_id),
"name": expected_name,
"url": expected_url
- }]
+ }],
+ "page": {
+ "cursor": None
+ }
}
mock_io = mock.MagicMock()
mock_io.get.return_value = expected_data
obj = User(data, mock_io)
- result = obj.boards
+
+ result = list()
+ for item in obj.boards:
+ result.append(item)
assert len(result) == 1
assert expected_url == result[0].url
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"mock"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.6.6
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
bleach==4.1.0
cachetools==4.2.4
certifi==2021.5.30
chardet==5.0.0
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
dateutils==0.6.12
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
-e git+https://github.com/TheFriendlyCoder/friendlypins.git@9c0aa4ebcde5ad444e342aaa2b3315339dbc36f6#egg=friendlypins
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==5.10.1
Jinja2==3.0.3
lazy-object-proxy==1.7.1
mando==0.7.1
MarkupSafe==2.0.1
mccabe==0.6.1
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
Pillow==8.4.0
pkginfo==1.10.0
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
Pygments==2.14.0
pylint==3.0.0a4
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
radon==6.0.1
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==4.0.0a9
tqdm==4.64.1
twine==1.15.0
typed-ast==1.4.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
webencodings==0.5.1
wrapt==1.12.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: friendlypins
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.6.6
- babel==2.11.0
- bleach==4.1.0
- cachetools==4.2.4
- chardet==5.0.0
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- dateutils==0.6.12
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- isort==5.10.1
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- mando==0.7.1
- markupsafe==2.0.1
- mccabe==0.6.1
- mock==5.2.0
- pillow==8.4.0
- pkginfo==1.10.0
- platformdirs==2.4.0
- pygments==2.14.0
- pylint==3.0.0a4
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- radon==6.0.1
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tox==4.0.0a9
- tqdm==4.64.1
- twine==1.15.0
- typed-ast==1.4.3
- urllib3==1.26.20
- virtualenv==20.17.1
- webencodings==0.5.1
- wrapt==1.12.1
prefix: /opt/conda/envs/friendlypins
| [
"unit_tests/test_board.py::test_get_pins"
] | [] | [
"unit_tests/test_board.py::test_board_properties",
"unit_tests/test_console_actions.py::test_download_thumbnails",
"unit_tests/test_console_actions.py::test_download_thumbnails_with_delete",
"unit_tests/test_console_actions.py::test_download_thumbnails_error",
"unit_tests/test_console_actions.py::test_download_thumbnails_missing_board",
"unit_tests/test_console_actions.py::test_download_thumbnails_exists",
"unit_tests/test_user.py::test_user_properties",
"unit_tests/test_user.py::test_get_boards"
] | [] | Apache License 2.0 | 2,413 | 1,073 | [
"src/friendlypins/board.py",
"src/friendlypins/user.py",
"src/friendlypins/utils/console_actions.py"
] |
|
TheFriendlyCoder__friendlypins-56 | 671c3a7d0546b2996f4b5c248621cc2899bad727 | 2018-04-17 01:48:23 | 671c3a7d0546b2996f4b5c248621cc2899bad727 | diff --git a/src/friendlypins/api.py b/src/friendlypins/api.py
index cff46b6..63b0360 100644
--- a/src/friendlypins/api.py
+++ b/src/friendlypins/api.py
@@ -17,20 +17,13 @@ class API(object): # pylint: disable=too-few-public-methods
self._log = logging.getLogger(__name__)
self._io = RestIO(personal_access_token)
- def get_user(self, username=None):
- """Gets all primitives associated with a particular Pinterest user
-
- :param str username:
- Optional name of a user to look up
- If not provided, the currently authentcated user will be returned
-
- :returns: Pinterest user with the given name
+ @property
+ def user(self):
+ """Gets all primitives associated with the authenticated user
+ :returns: currently authenticated pinterest user
:rtype: :class:`friendlypins.user.User`
"""
self._log.debug("Getting authenticated user details...")
- if username:
- raise NotImplementedError(
- "Querying arbitrary Pinerest users is not yet supported.")
fields = "id,username,first_name,last_name,bio,created_at,counts,image"
result = self._io.get("me", {"fields": fields})
diff --git a/src/friendlypins/utils/console_actions.py b/src/friendlypins/utils/console_actions.py
index 8789490..761d5b9 100644
--- a/src/friendlypins/utils/console_actions.py
+++ b/src/friendlypins/utils/console_actions.py
@@ -60,7 +60,7 @@ def download_thumbnails(api_token, board_name, output_folder, delete):
"""
log = logging.getLogger(__name__)
obj = API(api_token)
- user = obj.get_user()
+ user = obj.user
selected_board = None
for cur_board in user.boards:
@@ -97,7 +97,7 @@ def delete_board(api_token, board_name):
"""
log = logging.getLogger(__name__)
obj = API(api_token)
- user = obj.get_user()
+ user = obj.user
selected_board = None
for cur_board in user.boards:
| rename get_user to current_user
Seeing as how we currently only support retrieving data for the currently authenticated user, we should rename get_user to current_user and make it a property with no method parameters. | TheFriendlyCoder/friendlypins | diff --git a/unit_tests/test_api.py b/unit_tests/test_api.py
index b013c9e..8332b3f 100644
--- a/unit_tests/test_api.py
+++ b/unit_tests/test_api.py
@@ -21,7 +21,7 @@ def test_get_user():
mock_io.return_value = mock_obj
obj = API('abcd1234')
- result = obj.get_user()
+ result = obj.user
assert expected_url == result.url
assert expected_firstname == result.first_name
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.6.6
attrs==22.2.0
Babel==2.11.0
bleach==4.1.0
cachetools==4.2.4
certifi==2021.5.30
chardet==5.0.0
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
dateutils==0.6.12
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
-e git+https://github.com/TheFriendlyCoder/friendlypins.git@671c3a7d0546b2996f4b5c248621cc2899bad727#egg=friendlypins
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
isort==5.10.1
Jinja2==3.0.3
lazy-object-proxy==1.7.1
mando==0.7.1
MarkupSafe==2.0.1
mccabe==0.6.1
mock==5.2.0
packaging==21.3
Pillow==8.4.0
pkginfo==1.10.0
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
Pygments==2.14.0
pylint==3.0.0a4
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
radon==6.0.1
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml==0.10.2
tomli==1.2.3
tox==4.0.0a9
tqdm==4.64.1
twine==1.15.0
typed-ast==1.4.3
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
webencodings==0.5.1
wrapt==1.12.1
zipp==3.6.0
| name: friendlypins
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.6.6
- attrs==22.2.0
- babel==2.11.0
- bleach==4.1.0
- cachetools==4.2.4
- chardet==5.0.0
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- dateutils==0.6.12
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- isort==5.10.1
- jinja2==3.0.3
- lazy-object-proxy==1.7.1
- mando==0.7.1
- markupsafe==2.0.1
- mccabe==0.6.1
- mock==5.2.0
- packaging==21.3
- pillow==8.4.0
- pkginfo==1.10.0
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pygments==2.14.0
- pylint==3.0.0a4
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- radon==6.0.1
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- toml==0.10.2
- tomli==1.2.3
- tox==4.0.0a9
- tqdm==4.64.1
- twine==1.15.0
- typed-ast==1.4.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- webencodings==0.5.1
- wrapt==1.12.1
- zipp==3.6.0
prefix: /opt/conda/envs/friendlypins
| [
"unit_tests/test_api.py::test_get_user"
] | [] | [] | [] | Apache License 2.0 | 2,418 | 516 | [
"src/friendlypins/api.py",
"src/friendlypins/utils/console_actions.py"
] |
|
google__mobly-437 | d8e4c34b46d4bd0f2aa328823e543162933d76c0 | 2018-04-17 08:43:24 | 95286a01a566e056d44acfa9577a45bc7f37f51d | dthkao:
Review status: 0 of 2 files reviewed at latest revision, all discussions resolved.
---
*[mobly/controllers/android_device_lib/adb.py, line 279 at r1](https://beta.reviewable.io/reviews/google/mobly/437#-LAIsFf7-05sU6C6Hx5K:-LAIsFf7-05sU6C6Hx5L:b1zt8o6) ([raw file](https://github.com/google/mobly/blob/93853c8ba9cc06560e500141a68f3fa9039824ab/mobly/controllers/android_device_lib/adb.py#L279)):*
> ```Python
>
> def __getattr__(self, name):
> def adb_call(args=None, shell=False, timeout=None, return_all=False):
> ```
I'm not a huge fan of signatures changing based on a flag. Is there a way we can make this an init-level setting instead of per-call?
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/437)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device_lib/adb.py, line 279 at r1](https://beta.reviewable.io/reviews/google/mobly/437#-LAIsFf7-05sU6C6Hx5K:-LAIv4JN3Pnm3xAhzfbd:bwk8scm) ([raw file](https://github.com/google/mobly/blob/93853c8ba9cc06560e500141a68f3fa9039824ab/mobly/controllers/android_device_lib/adb.py#L279)):*
<details><summary><i>Previously, dthkao (David T.H. Kao) wrote…</i></summary><blockquote>
I'm not a huge fan of signatures changing based on a flag. Is there a way we can make this an init-level setting instead of per-call?
</blockquote></details>
I thought of the same thing initially.
However that wouldn't work since that'll break any util that makes adb calls and expect a single output, which is quite a lot.
---
*Comments from [Reviewable](https://beta.reviewable.io/reviews/google/mobly/437)*
<!-- Sent from Reviewable.io -->
| diff --git a/mobly/controllers/android_device_lib/adb.py b/mobly/controllers/android_device_lib/adb.py
index 432f08e..12c14bd 100644
--- a/mobly/controllers/android_device_lib/adb.py
+++ b/mobly/controllers/android_device_lib/adb.py
@@ -138,7 +138,7 @@ class AdbProxy(object):
def __init__(self, serial=''):
self.serial = serial
- def _exec_cmd(self, args, shell, timeout):
+ def _exec_cmd(self, args, shell, timeout, stderr):
"""Executes adb commands.
Args:
@@ -148,6 +148,8 @@ class AdbProxy(object):
False to invoke it directly. See subprocess.Popen() docs.
timeout: float, the number of seconds to wait before timing out.
If not specified, no timeout takes effect.
+ stderr: a Byte stream, like io.BytesIO, stderr of the command will
+ be written to this object if provided.
Returns:
The output of the adb command run if exit code is 0.
@@ -169,6 +171,8 @@ class AdbProxy(object):
raise AdbTimeoutError(cmd=args, timeout=timeout)
(out, err) = proc.communicate()
+ if stderr:
+ stderr.write(err)
ret = proc.returncode
logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s',
cli_cmd_to_string(args), out, err, ret)
@@ -177,7 +181,7 @@ class AdbProxy(object):
else:
raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret)
- def _exec_adb_cmd(self, name, args, shell, timeout):
+ def _exec_adb_cmd(self, name, args, shell, timeout, stderr):
if shell:
# Add quotes around "adb" in case the ADB path contains spaces. This
# is pretty common on Windows (e.g. Program Files).
@@ -195,7 +199,9 @@ class AdbProxy(object):
adb_cmd.append(args)
else:
adb_cmd.extend(args)
- return self._exec_cmd(adb_cmd, shell=shell, timeout=timeout)
+ out = self._exec_cmd(
+ adb_cmd, shell=shell, timeout=timeout, stderr=stderr)
+ return out
def getprop(self, prop_name):
"""Get a property of the device.
@@ -273,7 +279,7 @@ class AdbProxy(object):
return self.shell(instrumentation_command)
def __getattr__(self, name):
- def adb_call(args=None, shell=False, timeout=None):
+ def adb_call(args=None, shell=False, timeout=None, stderr=None):
"""Wrapper for an ADB command.
Args:
@@ -283,6 +289,8 @@ class AdbProxy(object):
False to invoke it directly. See subprocess.Proc() docs.
timeout: float, the number of seconds to wait before timing out.
If not specified, no timeout takes effect.
+ stderr: a Byte stream, like io.BytesIO, stderr of the command
+ will be written to this object if provided.
Returns:
The output of the adb command run if exit code is 0.
@@ -290,6 +298,6 @@ class AdbProxy(object):
args = args or ''
clean_name = name.replace('_', '-')
return self._exec_adb_cmd(
- clean_name, args, shell=shell, timeout=timeout)
+ clean_name, args, shell=shell, timeout=timeout, stderr=stderr)
return adb_call
| Propagate stderr from adb commands
The current mobly adb proxy does not propagate stderr if ret code is zero.
We thought this was ok since Android has fixed return code issues in M.
But turns out many China manufacturers did not fix this in China devices.
In order to better support China devices and potentially other devices of the same ret code problem, we need to surface stderr. | google/mobly | diff --git a/mobly/base_instrumentation_test.py b/mobly/base_instrumentation_test.py
index 4966cd4..bb72075 100644
--- a/mobly/base_instrumentation_test.py
+++ b/mobly/base_instrumentation_test.py
@@ -927,7 +927,7 @@ class BaseInstrumentationTestClass(base_test.BaseTestClass):
package=package,
options=options,
runner=runner,
- )
+ ).decode('utf-8')
logging.info('Outputting instrumentation test log...')
logging.info(instrumentation_output)
@@ -935,5 +935,5 @@ class BaseInstrumentationTestClass(base_test.BaseTestClass):
instrumentation_block = _InstrumentationBlock(prefix=prefix)
for line in instrumentation_output.splitlines():
instrumentation_block = self._parse_line(instrumentation_block,
- line.decode('utf-8'))
+ line)
return self._finish_parsing(instrumentation_block)
diff --git a/mobly/base_test.py b/mobly/base_test.py
index 8b761fa..e4e047b 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -26,6 +26,7 @@ from mobly import expects
from mobly import records
from mobly import signals
from mobly import runtime_test_info
+from mobly import utils
# Macro strings for test result reporting
TEST_CASE_TOKEN = '[Test]'
@@ -351,7 +352,7 @@ class BaseTestClass(object):
content: dict, the data to add to summary file.
"""
if 'timestamp' not in content:
- content['timestamp'] = time.time()
+ content['timestamp'] = utils.get_current_epoch_time()
self.summary_writer.dump(content,
records.TestSummaryEntryType.USER_DATA)
diff --git a/tests/mobly/base_instrumentation_test_test.py b/tests/mobly/base_instrumentation_test_test.py
index 2256475..3908015 100755
--- a/tests/mobly/base_instrumentation_test_test.py
+++ b/tests/mobly/base_instrumentation_test_test.py
@@ -34,6 +34,17 @@ MOCK_PREFIX = 'my_prefix'
# A mock name for the instrumentation test subclass.
MOCK_INSTRUMENTATION_TEST_CLASS_NAME = 'MockInstrumentationTest'
+MOCK_EMPTY_INSTRUMENTATION_TEST = """\
+INSTRUMENTATION_RESULT: stream=
+
+Time: 0.001
+
+OK (0 tests)
+
+
+INSTRUMENTATION_CODE: -1
+"""
+
class MockInstrumentationTest(BaseInstrumentationTestClass):
def __init__(self, tmp_dir, user_params={}):
@@ -229,18 +240,21 @@ INSTRUMENTATION_STATUS_CODE: -1
instrumentation_output, expected_has_error=True)
def test_run_instrumentation_test_with_no_tests(self):
- instrumentation_output = """\
-INSTRUMENTATION_RESULT: stream=
-
-Time: 0.001
-
-OK (0 tests)
-
+ instrumentation_output = MOCK_EMPTY_INSTRUMENTATION_TEST
+ self.assert_run_instrumentation_test(
+ instrumentation_output, expected_completed_and_passed=True)
-INSTRUMENTATION_CODE: -1
-"""
+ @unittest.skipUnless(
+ sys.version_info >= (3, 0),
+ 'Only python3 displays different string types differently.')
+ @mock.patch('logging.info')
+ def test_run_instrumentation_test_logs_correctly(self, mock_info_logger):
+ instrumentation_output = MOCK_EMPTY_INSTRUMENTATION_TEST
self.assert_run_instrumentation_test(
instrumentation_output, expected_completed_and_passed=True)
+ for mock_call in mock_info_logger.mock_calls:
+ logged_format = mock_call[1][0]
+ self.assertIsInstance(logged_format, str)
def test_run_instrumentation_test_with_passing_test(self):
instrumentation_output = """\
diff --git a/tests/mobly/controllers/android_device_lib/adb_test.py b/tests/mobly/controllers/android_device_lib/adb_test.py
index 9eb3ab8..7bf61ab 100755
--- a/tests/mobly/controllers/android_device_lib/adb_test.py
+++ b/tests/mobly/controllers/android_device_lib/adb_test.py
@@ -12,11 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import io
import mock
+import subprocess
from collections import OrderedDict
from future.tests.base import unittest
-
from mobly.controllers.android_device_lib import adb
# Mock parameters for instrumentation.
@@ -42,6 +43,9 @@ MOCK_OPTIONS_INSTRUMENTATION_COMMAND = ('am instrument -r -w -e option1 value1'
# Mock Shell Command
MOCK_SHELL_COMMAND = 'ls'
MOCK_COMMAND_OUTPUT = '/system/bin/ls'.encode('utf-8')
+MOCK_DEFAULT_STDOUT = 'out'
+MOCK_DEFAULT_STDERR = 'err'
+MOCK_DEFAULT_COMMAND_OUTPUT = MOCK_DEFAULT_STDOUT.encode('utf-8')
MOCK_ADB_SHELL_COMMAND_CHECK = 'adb shell command -v ls'
@@ -58,7 +62,8 @@ class AdbTest(unittest.TestCase):
mock_psutil_process.return_value = mock.Mock()
mock_proc.communicate = mock.Mock(
- return_value=('out'.encode('utf-8'), 'err'.encode('utf-8')))
+ return_value=(MOCK_DEFAULT_STDOUT.encode('utf-8'),
+ MOCK_DEFAULT_STDERR.encode('utf-8')))
mock_proc.returncode = 0
return (mock_psutil_process, mock_popen)
@@ -68,9 +73,9 @@ class AdbTest(unittest.TestCase):
mock_Popen):
self._mock_process(mock_psutil_process, mock_Popen)
- reply = adb.AdbProxy()._exec_cmd(
- ['fake_cmd'], shell=False, timeout=None)
- self.assertEqual('out', reply.decode('utf-8'))
+ out = adb.AdbProxy()._exec_cmd(
+ ['fake_cmd'], shell=False, timeout=None, stderr=None)
+ self.assertEqual(MOCK_DEFAULT_STDOUT, out.decode('utf-8'))
@mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
@mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process')
@@ -81,7 +86,8 @@ class AdbTest(unittest.TestCase):
with self.assertRaisesRegex(adb.AdbError,
'Error executing adb cmd .*'):
- adb.AdbProxy()._exec_cmd(['fake_cmd'], shell=False, timeout=None)
+ adb.AdbProxy()._exec_cmd(
+ ['fake_cmd'], shell=False, timeout=None, stderr=None)
@mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
@mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process')
@@ -89,8 +95,9 @@ class AdbTest(unittest.TestCase):
mock_popen):
self._mock_process(mock_psutil_process, mock_popen)
- reply = adb.AdbProxy()._exec_cmd(['fake_cmd'], shell=False, timeout=1)
- self.assertEqual('out', reply.decode('utf-8'))
+ out = adb.AdbProxy()._exec_cmd(
+ ['fake_cmd'], shell=False, timeout=1, stderr=None)
+ self.assertEqual(MOCK_DEFAULT_STDOUT, out.decode('utf-8'))
@mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
@mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process')
@@ -104,7 +111,8 @@ class AdbTest(unittest.TestCase):
with self.assertRaisesRegex(adb.AdbTimeoutError,
'Timed out executing command "fake_cmd" '
'after 0.1s.'):
- adb.AdbProxy()._exec_cmd(['fake_cmd'], shell=False, timeout=0.1)
+ adb.AdbProxy()._exec_cmd(
+ ['fake_cmd'], shell=False, timeout=0.1, stderr=None)
@mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
@mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process')
@@ -113,66 +121,100 @@ class AdbTest(unittest.TestCase):
self._mock_process(mock_psutil_process, mock_popen)
with self.assertRaisesRegex(adb.Error,
'Timeout is not a positive value: -1'):
- adb.AdbProxy()._exec_cmd(['fake_cmd'], shell=False, timeout=-1)
+ adb.AdbProxy()._exec_cmd(
+ ['fake_cmd'], shell=False, timeout=-1, stderr=None)
def test_exec_adb_cmd(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy().shell(['arg1', 'arg2'])
mock_exec_cmd.assert_called_once_with(
- ['adb', 'shell', 'arg1', 'arg2'], shell=False, timeout=None)
+ ['adb', 'shell', 'arg1', 'arg2'],
+ shell=False,
+ timeout=None,
+ stderr=None)
+
+ def test_exec_adb_cmd_with_serial(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy('12345').shell(['arg1', 'arg2'])
mock_exec_cmd.assert_called_once_with(
['adb', '-s', '12345', 'shell', 'arg1', 'arg2'],
shell=False,
- timeout=None)
+ timeout=None,
+ stderr=None)
def test_exec_adb_cmd_with_shell_true(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy().shell('arg1 arg2', shell=True)
mock_exec_cmd.assert_called_once_with(
- '"adb" shell arg1 arg2', shell=True, timeout=None)
+ '"adb" shell arg1 arg2', shell=True, timeout=None, stderr=None)
+
+ def test_exec_adb_cmd_with_shell_true_with_serial(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy('12345').shell('arg1 arg2', shell=True)
mock_exec_cmd.assert_called_once_with(
- '"adb" -s "12345" shell arg1 arg2', shell=True, timeout=None)
+ '"adb" -s "12345" shell arg1 arg2',
+ shell=True,
+ timeout=None,
+ stderr=None)
+
+ @mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
+ @mock.patch('mobly.controllers.android_device_lib.adb.psutil.Process')
+ def test_exec_adb_cmd_with_stderr_pipe(self, mock_psutil_process,
+ mock_popen):
+ self._mock_process(mock_psutil_process, mock_popen)
+ stderr_redirect = io.BytesIO()
+ out = adb.AdbProxy().shell(
+ 'arg1 arg2', shell=True, stderr=stderr_redirect)
+ self.assertEqual(MOCK_DEFAULT_STDOUT, out.decode('utf-8'))
+ self.assertEqual(MOCK_DEFAULT_STDERR,
+ stderr_redirect.getvalue().decode('utf-8'))
def test_instrument_without_parameters(self):
"""Verifies the AndroidDevice object's instrument command is correct in
the basic case.
"""
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy().instrument(MOCK_INSTRUMENTATION_PACKAGE)
mock_exec_cmd.assert_called_once_with(
['adb', 'shell', MOCK_BASIC_INSTRUMENTATION_COMMAND],
shell=False,
- timeout=None)
+ timeout=None,
+ stderr=None)
def test_instrument_with_runner(self):
"""Verifies the AndroidDevice object's instrument command is correct
with a runner specified.
"""
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy().instrument(
MOCK_INSTRUMENTATION_PACKAGE,
runner=MOCK_INSTRUMENTATION_RUNNER)
mock_exec_cmd.assert_called_once_with(
['adb', 'shell', MOCK_RUNNER_INSTRUMENTATION_COMMAND],
shell=False,
- timeout=None)
+ timeout=None,
+ stderr=None)
def test_instrument_with_options(self):
"""Verifies the AndroidDevice object's instrument command is correct
with options.
"""
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy().instrument(
MOCK_INSTRUMENTATION_PACKAGE,
options=MOCK_INSTRUMENTATION_OPTIONS)
mock_exec_cmd.assert_called_once_with(
['adb', 'shell', MOCK_OPTIONS_INSTRUMENTATION_COMMAND],
shell=False,
- timeout=None)
+ timeout=None,
+ stderr=None)
def test_cli_cmd_to_string(self):
cmd = ['"adb"', 'a b', 'c//']
@@ -182,11 +224,13 @@ class AdbTest(unittest.TestCase):
def test_has_shell_command_called_correctly(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
adb.AdbProxy().has_shell_command(MOCK_SHELL_COMMAND)
mock_exec_cmd.assert_called_once_with(
['adb', 'shell', 'command', '-v', MOCK_SHELL_COMMAND],
shell=False,
- timeout=None)
+ timeout=None,
+ stderr=None)
def test_has_shell_command_with_existing_command(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
@@ -196,6 +240,7 @@ class AdbTest(unittest.TestCase):
def test_has_shell_command_with_missing_command_on_older_devices(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
mock_exec_cmd.side_effect = adb.AdbError(
MOCK_ADB_SHELL_COMMAND_CHECK, '', '', 0)
self.assertFalse(
@@ -203,6 +248,7 @@ class AdbTest(unittest.TestCase):
def test_has_shell_command_with_missing_command_on_newer_devices(self):
with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ mock_exec_cmd.return_value = MOCK_DEFAULT_COMMAND_OUTPUT
mock_exec_cmd.side_effect = adb.AdbError(
MOCK_ADB_SHELL_COMMAND_CHECK, '', '', 1)
self.assertFalse(
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 1.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
coverage==6.2
execnet==1.9.0
future==1.0.0
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/google/mobly.git@d8e4c34b46d4bd0f2aa328823e543162933d76c0#egg=mobly
mock==1.0.1
packaging==21.3
pluggy==1.0.0
portpicker==1.6.0
psutil==7.0.0
py==1.11.0
pyparsing==3.1.4
pyserial==3.5
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
pytz==2025.2
PyYAML==6.0.1
timeout-decorator==0.5.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- execnet==1.9.0
- future==1.0.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- mock==1.0.1
- packaging==21.3
- pluggy==1.0.0
- portpicker==1.6.0
- psutil==7.0.0
- py==1.11.0
- pyparsing==3.1.4
- pyserial==3.5
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- pytz==2025.2
- pyyaml==6.0.1
- timeout-decorator==0.5.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_serial",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_shell_true",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_shell_true_with_serial",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_stderr_pipe",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_error_no_timeout",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_no_timeout_success",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_timed_out",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_with_negative_timeout_value",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_with_timeout_success",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_called_correctly",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_options",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_runner",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_without_parameters"
] | [] | [
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test__Instrumentation_block_set_key_on_multiple_equals_sign",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_parse_instrumentation_options_with_mixed_user_params",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_parse_instrumentation_options_with_no_instrumentation_params",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_parse_instrumentation_options_with_no_user_params",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_parse_instrumentation_options_with_only_instrumentation_params",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_logs_correctly",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_assumption_failure_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_crashed_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_crashing_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_failing_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_ignored_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_invalid_syntax",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_missing_runner",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_missing_test_package",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_multiple_tests",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_no_output",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_no_tests",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_passing_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_prefix_test",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_random_whitespace",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_runner_setup_crash",
"tests/mobly/base_instrumentation_test_test.py::BaseInstrumentationTestTest::test_run_instrumentation_test_with_runner_teardown_crash",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_cli_cmd_to_string",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_existing_command",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_missing_command_on_newer_devices",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_missing_command_on_older_devices"
] | [] | Apache License 2.0 | 2,419 | 853 | [
"mobly/controllers/android_device_lib/adb.py"
] |
python-trio__trio-502 | 94d49f95ffba3634197c173b771dca80ebc70b08 | 2018-04-17 09:40:50 | 72dba90e31604c083a177978c40c4dd8570aee21 | diff --git a/trio/_path.py b/trio/_path.py
index 4b1bee16..7f777936 100644
--- a/trio/_path.py
+++ b/trio/_path.py
@@ -128,6 +128,28 @@ class Path(metaclass=AsyncAutoWrapperType):
self._wrapped = pathlib.Path(*args)
+ async def iterdir(self):
+ """
+ Like :meth:`pathlib.Path.iterdir`, but async.
+
+ This is an async method that returns a synchronous iterator, so you
+ use it like::
+
+ for subpath in await mypath.iterdir():
+ ...
+
+ Note that it actually loads the whole directory list into memory
+ immediately, during the initial call. (See `issue #501
+ <https://github.com/python-trio/trio/issues/501>`__ for discussion.)
+
+ """
+
+ def _load_items():
+ return list(self._wrapped.iterdir())
+
+ items = await trio.run_sync_in_worker_thread(_load_items)
+ return (Path(item) for item in items)
+
def __getattr__(self, name):
if name in self._forward:
value = getattr(self._wrapped, name)
| trio.Path.iterdir wrapping is broken
Given `pathlib.Path.iterdir` returns a generator that does IO access on each iteration, `trio.Path.iterdir` is currently broken given it currently only generates the generator asynchronously (which I suppose is pointless given there is no need for IO at generator creation)
The solution would be to modify `trio.Path.iterdir` to return an async generator, however this means creating a special case given the current implementation is only an async wrapper on `pathlib.Path.iterdir`. | python-trio/trio | diff --git a/trio/tests/test_path.py b/trio/tests/test_path.py
index 6b9d1c15..1289cfa2 100644
--- a/trio/tests/test_path.py
+++ b/trio/tests/test_path.py
@@ -198,3 +198,17 @@ async def test_path_nonpath():
async def test_open_file_can_open_path(path):
async with await trio.open_file(path, 'w') as f:
assert f.name == fspath(path)
+
+
+async def test_iterdir(path):
+ # Populate a directory
+ await path.mkdir()
+ await (path / 'foo').mkdir()
+ await (path / 'bar.txt').write_bytes(b'')
+
+ entries = set()
+ for entry in await path.iterdir():
+ assert isinstance(entry, trio.Path)
+ entries.add(entry.name)
+
+ assert entries == {'bar.txt', 'foo'}
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"ipython",
"pyOpenSSL",
"trustme",
"pytest-faulthandler"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asttokens==3.0.0
async-generator==1.10
attrs==25.3.0
cffi==1.17.1
coverage==7.8.0
cryptography==44.0.2
decorator==5.2.1
exceptiongroup==1.2.2
executing==2.2.0
idna==3.10
iniconfig==2.1.0
ipython==8.18.1
jedi==0.19.2
matplotlib-inline==0.1.7
packaging==24.2
parso==0.8.4
pexpect==4.9.0
pluggy==1.5.0
prompt_toolkit==3.0.50
ptyprocess==0.7.0
pure_eval==0.2.3
pycparser==2.22
Pygments==2.19.1
pyOpenSSL==25.0.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-faulthandler==2.0.1
sortedcontainers==2.4.0
stack-data==0.6.3
tomli==2.2.1
traitlets==5.14.3
-e git+https://github.com/python-trio/trio.git@94d49f95ffba3634197c173b771dca80ebc70b08#egg=trio
trustme==1.2.1
typing_extensions==4.13.0
wcwidth==0.2.13
| name: trio
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asttokens==3.0.0
- async-generator==1.10
- attrs==25.3.0
- cffi==1.17.1
- coverage==7.8.0
- cryptography==44.0.2
- decorator==5.2.1
- exceptiongroup==1.2.2
- executing==2.2.0
- idna==3.10
- iniconfig==2.1.0
- ipython==8.18.1
- jedi==0.19.2
- matplotlib-inline==0.1.7
- packaging==24.2
- parso==0.8.4
- pexpect==4.9.0
- pluggy==1.5.0
- prompt-toolkit==3.0.50
- ptyprocess==0.7.0
- pure-eval==0.2.3
- pycparser==2.22
- pygments==2.19.1
- pyopenssl==25.0.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-faulthandler==2.0.1
- sortedcontainers==2.4.0
- stack-data==0.6.3
- tomli==2.2.1
- traitlets==5.14.3
- trio==0.4.0+dev
- trustme==1.2.1
- typing-extensions==4.13.0
- wcwidth==0.2.13
prefix: /opt/conda/envs/trio
| [
"trio/tests/test_path.py::test_iterdir"
] | [] | [
"trio/tests/test_path.py::test_open_is_async_context_manager",
"trio/tests/test_path.py::test_magic",
"trio/tests/test_path.py::test_cmp_magic[Path-Path0]",
"trio/tests/test_path.py::test_cmp_magic[Path-Path1]",
"trio/tests/test_path.py::test_cmp_magic[Path-Path2]",
"trio/tests/test_path.py::test_div_magic[Path-Path0]",
"trio/tests/test_path.py::test_div_magic[Path-Path1]",
"trio/tests/test_path.py::test_div_magic[Path-str]",
"trio/tests/test_path.py::test_div_magic[str-Path]",
"trio/tests/test_path.py::test_forwarded_properties",
"trio/tests/test_path.py::test_async_method_signature",
"trio/tests/test_path.py::test_compare_async_stat_methods[is_dir]",
"trio/tests/test_path.py::test_compare_async_stat_methods[is_file]",
"trio/tests/test_path.py::test_invalid_name_not_wrapped",
"trio/tests/test_path.py::test_async_methods_rewrap[absolute]",
"trio/tests/test_path.py::test_async_methods_rewrap[resolve]",
"trio/tests/test_path.py::test_forward_methods_rewrap",
"trio/tests/test_path.py::test_forward_properties_rewrap",
"trio/tests/test_path.py::test_forward_methods_without_rewrap",
"trio/tests/test_path.py::test_repr",
"trio/tests/test_path.py::test_type_forwards_unsupported",
"trio/tests/test_path.py::test_type_wraps_unsupported",
"trio/tests/test_path.py::test_type_forwards_private",
"trio/tests/test_path.py::test_type_wraps_private",
"trio/tests/test_path.py::test_path_wraps_path[__init__]",
"trio/tests/test_path.py::test_path_wraps_path[joinpath]",
"trio/tests/test_path.py::test_path_nonpath",
"trio/tests/test_path.py::test_open_file_can_open_path"
] | [] | MIT/Apache-2.0 Dual License | 2,420 | 296 | [
"trio/_path.py"
] |
|
TheFriendlyCoder__friendlypins-59 | eed1f246c388b9c1c92755d2c6dd77b5133a686c | 2018-04-18 00:31:38 | eed1f246c388b9c1c92755d2c6dd77b5133a686c | diff --git a/src/friendlypins/api.py b/src/friendlypins/api.py
index f8b7255..4b014c9 100644
--- a/src/friendlypins/api.py
+++ b/src/friendlypins/api.py
@@ -25,7 +25,18 @@ class API(object): # pylint: disable=too-few-public-methods
"""
self._log.debug("Getting authenticated user details...")
- fields = "id,username,first_name,last_name,bio,created_at,counts,image"
+ fields = ",".join([
+ "id",
+ "username",
+ "first_name",
+ "last_name",
+ "bio",
+ "created_at",
+ "counts",
+ "image",
+ "account_type",
+ "url"
+ ])
result = self._io.get("me", {"fields": fields})
assert 'data' in result
diff --git a/src/friendlypins/board.py b/src/friendlypins/board.py
index d8626f6..4118157 100644
--- a/src/friendlypins/board.py
+++ b/src/friendlypins/board.py
@@ -47,6 +47,14 @@ class Board(object):
"""
return self._data['name']
+ @property
+ def description(self):
+ """Gets the descriptive text associated with this board
+
+ :rtype: :class:`str`
+ """
+ return self._data['description']
+
@property
def url(self):
"""Web address for the UI associated with the dashboard
diff --git a/src/friendlypins/user.py b/src/friendlypins/user.py
index 42367b6..2230b65 100644
--- a/src/friendlypins/user.py
+++ b/src/friendlypins/user.py
@@ -109,7 +109,9 @@ class User(object):
"creator",
"created_at",
"counts",
- "image"
+ "image",
+ "reason",
+ "privacy"
])
}
@@ -119,6 +121,35 @@ class User(object):
for cur_item in cur_page['data']:
yield Board(cur_item, self._io)
+ def create_board(self, name, description=None):
+ """Creates a new board for the currently authenticated user
+
+ :param str name: name for the new board
+ :param str description: optional descriptive text for the board
+ :returns: reference to the newly created board
+ :rtype: :class:`friendlypins.board.Board`
+ """
+ properties = {
+ "fields": ','.join([
+ "id",
+ "name",
+ "url",
+ "description",
+ "creator",
+ "created_at",
+ "counts",
+ "image",
+ "reason",
+ "privacy"
+ ])
+ }
+
+ data = {"name": name}
+ if description:
+ data["description"] = description
+
+ result = self._io.post("boards", data, properties)
+ return Board(result['data'], self._io)
if __name__ == "__main__":
pass
diff --git a/src/friendlypins/utils/rest_io.py b/src/friendlypins/utils/rest_io.py
index 20456a5..ed7a77e 100644
--- a/src/friendlypins/utils/rest_io.py
+++ b/src/friendlypins/utils/rest_io.py
@@ -59,12 +59,44 @@ class RestIO(object):
properties["access_token"] = self._token
response = requests.get(temp_url, params=properties)
+
+ self._log.debug("Get response text is %s", response.text)
self._latest_header = Headers(response.headers)
self._log.debug("%s query header: %s", path, self._latest_header)
response.raise_for_status()
return response.json()
+ def post(self, path, data, properties=None):
+ """Posts API data to a given sub-path
+
+ :param str path: sub-path with in the REST API to send data to
+ :param dict data: form data to be posted to the API endpoint
+ :param dict properties:
+ optional set of request properties to append to the API call
+ :returns: json data returned from the API endpoint
+ :rtype: :class:`dict`
+ """
+ self._log.debug(
+ "Posting data from %s with options %s",
+ path,
+ properties
+ )
+ temp_url = "{0}/{1}/".format(self._root_url, path)
+
+ if properties is None:
+ properties = dict()
+ properties["access_token"] = self._token
+
+ response = requests.post(temp_url, data=data, params=properties)
+ self._latest_header = Headers(response.headers)
+ self._log.debug("%s query header: %s", path, self._latest_header)
+ self._log.debug("Post response text is %s", response.text)
+
+ response.raise_for_status()
+
+ return response.json()
+
def get_pages(self, path, properties=None):
"""Generator for iterating over paged results returned from API
| Add code to create new boards
The next logical progression in the API development is to add code for creating new boards for a particular authenticated user. | TheFriendlyCoder/friendlypins | diff --git a/unit_tests/test_rest_io.py b/unit_tests/test_rest_io.py
index b8b4b92..4be8eab 100644
--- a/unit_tests/test_rest_io.py
+++ b/unit_tests/test_rest_io.py
@@ -43,5 +43,30 @@ def test_get_headers(mock_requests):
assert tmp.bytes == expected_bytes
[email protected]("friendlypins.utils.rest_io.requests")
+def test_post(mock_requests):
+ obj = RestIO("1234abcd")
+ expected_path = "me/boards"
+ expected_data = {
+ "name": "My New Board",
+ "description": "Here is my cool description"
+ }
+
+ expected_results = {
+ "testing": "123"
+ }
+ mock_response = mock.MagicMock()
+ mock_requests.post.return_value = mock_response
+ mock_response.json.return_value = expected_results
+
+ res = obj.post(expected_path, expected_data)
+
+ mock_response.json.assert_called_once()
+ mock_requests.post.assert_called_once()
+
+ assert expected_path in mock_requests.post.call_args[0][0]
+ assert "data" in mock_requests.post.call_args[1]
+ assert mock_requests.post.call_args[1]["data"] == expected_data
+
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
diff --git a/unit_tests/test_user.py b/unit_tests/test_user.py
index 6ce221e..75d051e 100644
--- a/unit_tests/test_user.py
+++ b/unit_tests/test_user.py
@@ -63,6 +63,29 @@ def test_get_boards():
assert expected_name == result[0].name
assert expected_id == result[0].unique_id
+def test_create_board():
+ expected_name = "My Board"
+ expected_desc = "My new board is about this stuff..."
+ data = {
+ "id": "1234",
+ "first_name": "Jonh",
+ "last_name": "Doe"
+ }
+ mock_io = mock.MagicMock()
+ mock_io.post.return_value = {
+ "data": {
+ "name": expected_name,
+ "description": expected_desc,
+ "id": "12345"
+ }
+ }
+ obj = User(data, mock_io)
+
+ board = obj.create_board(expected_name, expected_desc)
+ mock_io.post.assert_called_once()
+ assert board is not None
+ assert board.name == expected_name
+ assert board.description == expected_desc
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"pip install wheel twine tox"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
astroid==2.6.6
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
Babel==2.11.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
cryptography==40.0.2
dateutils==0.6.12
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
-e git+https://github.com/TheFriendlyCoder/friendlypins.git@eed1f246c388b9c1c92755d2c6dd77b5133a686c#egg=friendlypins
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isort==5.10.1
jeepney==0.7.1
Jinja2==3.0.3
keyring==23.4.1
lazy-object-proxy==1.7.1
mando==0.7.1
MarkupSafe==2.0.1
mccabe==0.6.1
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
Pillow==8.4.0
pkginfo==1.10.0
platformdirs==2.4.0
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycparser==2.21
Pygments==2.14.0
pylint==3.0.0a4
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
radon==6.0.1
readme-renderer==34.0
requests==2.27.1
requests-toolbelt==1.0.0
rfc3986==1.5.0
SecretStorage==3.3.3
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
tox==3.28.0
tqdm==4.64.1
twine==1.15.0
typed-ast==1.4.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
virtualenv==20.17.1
webencodings==0.5.1
wrapt==1.12.1
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: friendlypins
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- astroid==2.6.6
- babel==2.11.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- cryptography==40.0.2
- dateutils==0.6.12
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- isort==5.10.1
- jeepney==0.7.1
- jinja2==3.0.3
- keyring==23.4.1
- lazy-object-proxy==1.7.1
- mando==0.7.1
- markupsafe==2.0.1
- mccabe==0.6.1
- mock==5.2.0
- pillow==8.4.0
- pkginfo==1.10.0
- platformdirs==2.4.0
- pycparser==2.21
- pygments==2.14.0
- pylint==3.0.0a4
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- radon==6.0.1
- readme-renderer==34.0
- requests==2.27.1
- requests-toolbelt==1.0.0
- rfc3986==1.5.0
- secretstorage==3.3.3
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- tox==3.28.0
- tqdm==4.64.1
- twine==1.15.0
- typed-ast==1.4.3
- urllib3==1.26.20
- virtualenv==20.17.1
- webencodings==0.5.1
- wrapt==1.12.1
prefix: /opt/conda/envs/friendlypins
| [
"unit_tests/test_rest_io.py::test_post",
"unit_tests/test_user.py::test_create_board"
] | [] | [
"unit_tests/test_rest_io.py::test_get_method",
"unit_tests/test_rest_io.py::test_get_headers",
"unit_tests/test_user.py::test_user_properties",
"unit_tests/test_user.py::test_get_boards"
] | [] | Apache License 2.0 | 2,421 | 1,204 | [
"src/friendlypins/api.py",
"src/friendlypins/board.py",
"src/friendlypins/user.py",
"src/friendlypins/utils/rest_io.py"
] |
|
ttu__ruuvitag-sensor-41 | c0d986391149d31d60d9649cfd9f3946db92a50c | 2018-04-19 15:39:25 | c0d986391149d31d60d9649cfd9f3946db92a50c | diff --git a/ruuvitag_sensor/ruuvi.py b/ruuvitag_sensor/ruuvi.py
index ffd6bc6..0dffc62 100644
--- a/ruuvitag_sensor/ruuvi.py
+++ b/ruuvitag_sensor/ruuvi.py
@@ -202,13 +202,12 @@ class RuuviTagSensor(object):
Returns:
string: Sensor data
"""
+ # Search of FF990403 (Manufacturer Specific Data (FF) / Ruuvi Innovations ltd (9904) / Format 3 (03))
try:
- if len(raw) != 54:
+ if "FF990403" not in raw:
return None
- if raw[16:18] != '03':
- return None
-
- return raw[16:]
+ payload_start = raw.index("FF990403") + 6;
+ return raw[payload_start:]
except:
return None
| Bug: incompatible with RuuviFW 1.2.8
The 1.2.8 update to Ruuvi Firmware trims extra NULLs at the end of transmission which breaks the data format type check. I can fix this and implement #29 . | ttu/ruuvitag-sensor | diff --git a/tests/test_decoder.py b/tests/test_decoder.py
index cd92d1d..639b71a 100644
--- a/tests/test_decoder.py
+++ b/tests/test_decoder.py
@@ -51,6 +51,16 @@ class TestDecoder(TestCase):
self.assertNotEqual(data['acceleration_y'], 0)
self.assertNotEqual(data['acceleration_z'], 0)
+ data = decoder.decode_data('03291A1ECE1EFC18F94202CA0B53BB')
+ self.assertEqual(data['temperature'], 26.3)
+ self.assertEqual(data['pressure'], 1027.66)
+ self.assertEqual(data['humidity'], 20.5)
+ self.assertEqual(data['battery'], 2899)
+ self.assertNotEqual(data['acceleration'], 0)
+ self.assertEqual(data['acceleration_x'], -1000)
+ self.assertNotEqual(data['acceleration_y'], 0)
+ self.assertNotEqual(data['acceleration_z'], 0)
+
def test_df3decode_is_valid_max_values(self):
decoder = Df3Decoder()
humidity = 'C8'
diff --git a/tests/test_ruuvitag_sensor.py b/tests/test_ruuvitag_sensor.py
index ac9e3bb..16fcbc0 100644
--- a/tests/test_ruuvitag_sensor.py
+++ b/tests/test_ruuvitag_sensor.py
@@ -47,7 +47,8 @@ class TestRuuviTagSensor(TestCase):
('CC:2C:6A:1E:59:3D', '1E0201060303AAFE1616AAFE10EE037275752E76692F23416A7759414D4663CD'),
('DD:2C:6A:1E:59:3D', '1E0201060303AAFE1616AAFE10EE037275752E76692F23416A7759414D4663CD'),
('EE:2C:6A:1E:59:3D', '1F0201060303AAFE1716AAFE10F9037275752E76692F23416A5558314D417730C3'),
- ('FF:2C:6A:1E:59:3D', '1902010415FF990403291A1ECE1E02DEF94202CA0B5300000000BB')
+ ('FF:2C:6A:1E:59:3D', '1902010415FF990403291A1ECE1E02DEF94202CA0B5300000000BB'),
+ ('00:2C:6A:1E:59:3D', '1902010415FF990403291A1ECE1E02DEF94202CA0B53BB')
]
for data in datas:
@@ -59,7 +60,7 @@ class TestRuuviTagSensor(TestCase):
get_datas)
def test_find_tags(self):
tags = RuuviTagSensor.find_ruuvitags()
- self.assertEqual(5, len(tags))
+ self.assertEqual(6, len(tags))
@patch('ruuvitag_sensor.ble_communication.BleCommunicationDummy.get_datas',
get_datas)
@@ -87,7 +88,7 @@ class TestRuuviTagSensor(TestCase):
def test_get_datas(self):
datas = []
RuuviTagSensor.get_datas(lambda x: datas.append(x))
- self.assertEqual(5, len(datas))
+ self.assertEqual(6, len(datas))
@patch('ruuvitag_sensor.ble_communication.BleCommunicationDummy.get_datas',
get_datas)
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 1
} | 0.10 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y bluez bluez-hcidump"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
iniconfig==2.1.0
packaging==24.2
pluggy==1.5.0
psutil==7.0.0
ptyprocess==0.7.0
pytest==8.3.5
-e git+https://github.com/ttu/ruuvitag-sensor.git@c0d986391149d31d60d9649cfd9f3946db92a50c#egg=ruuvitag_sensor
Rx==3.2.0
tomli==2.2.1
| name: ruuvitag-sensor
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- psutil==7.0.0
- ptyprocess==0.7.0
- pytest==8.3.5
- rx==3.2.0
- tomli==2.2.1
prefix: /opt/conda/envs/ruuvitag-sensor
| [
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_find_tags",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_get_datas"
] | [] | [
"tests/test_decoder.py::TestDecoder::test_decode_is_valid",
"tests/test_decoder.py::TestDecoder::test_decode_is_valid_case2",
"tests/test_decoder.py::TestDecoder::test_decode_is_valid_weatherstation_2017_04_12",
"tests/test_decoder.py::TestDecoder::test_df3decode_is_valid",
"tests/test_decoder.py::TestDecoder::test_df3decode_is_valid_max_values",
"tests/test_decoder.py::TestDecoder::test_df3decode_is_valid_min_values",
"tests/test_decoder.py::TestDecoder::test_getcorrectdecoder",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_convert_data_not_valid",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_false_mac_raise_error",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_get_data_for_sensors",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_get_datas_with_macs",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_tag_correct_properties",
"tests/test_ruuvitag_sensor.py::TestRuuviTagSensor::test_tag_update_is_valid"
] | [] | MIT License | 2,425 | 246 | [
"ruuvitag_sensor/ruuvi.py"
] |
|
uptick__pymyob-10 | 7baef26a62b54be57dd4dfbc80cf6962b04acf74 | 2018-04-20 07:19:04 | 7baef26a62b54be57dd4dfbc80cf6962b04acf74 | diff --git a/myob/managers.py b/myob/managers.py
index a9010ec..1e17411 100644
--- a/myob/managers.py
+++ b/myob/managers.py
@@ -29,75 +29,45 @@ class Manager():
def build_method(self, method, endpoint, hint):
full_endpoint = self.base_url + endpoint
- required_args = re.findall('\[([^\]]*)\]', full_endpoint)
- if method in ('PUT', 'POST'):
- required_args.append('data')
+ url_keys = re.findall('\[([^\]]*)\]', full_endpoint)
template = full_endpoint.replace('[', '{').replace(']', '}')
+ required_kwargs = url_keys.copy()
+ if method in ('PUT', 'POST'):
+ required_kwargs.append('data')
+
def inner(*args, **kwargs):
if args:
raise AttributeError("Unnamed args provided. Only keyword args accepted.")
- # Ensure all required args have been provided.
- missing_args = set(required_args) - set(kwargs.keys())
- if missing_args:
- raise KeyError("Missing args %s. Endpoint requires %s." % (
- list(missing_args), required_args
+ # Ensure all required url kwargs have been provided.
+ missing_kwargs = set(required_kwargs) - set(kwargs.keys())
+ if missing_kwargs:
+ raise KeyError("Missing kwargs %s. Endpoint requires %s." % (
+ list(missing_kwargs), required_kwargs
))
+ # Parse kwargs.
+ url_kwargs = {}
+ request_kwargs_raw = {}
+ for k, v in kwargs.items():
+ if k in url_keys:
+ url_kwargs[k] = v
+ elif k != 'data':
+ request_kwargs_raw[k] = v
+
# Determine request method.
request_method = 'GET' if method == 'ALL' else method
# Build url.
- url = template.format(**kwargs)
-
- request_kwargs = {}
-
- # Build headers.
- request_kwargs['headers'] = {
- 'Authorization': 'Bearer %s' % self.credentials.oauth_token,
- 'x-myobapi-cftoken': self.credentials.userpass,
- 'x-myobapi-key': self.credentials.consumer_key,
- 'x-myobapi-version': 'v2',
- }
-
- # Build query.
- request_kwargs['params'] = {}
- filters = []
- for k, v in kwargs.items():
- if k not in required_args + ['orderby', 'format', 'headers', 'page', 'limit', 'templatename']:
- if isinstance(v, str):
- v = [v]
- filters.append(' or '.join("%s eq '%s'" % (k, v_) for v_ in v))
- if filters:
- request_kwargs['params']['$filter'] = '&'.join(filters)
-
- if 'orderby' in kwargs:
- request_kwargs['params']['$orderby'] = kwargs['orderby']
-
- page_size = DEFAULT_PAGE_SIZE
- if 'limit' in kwargs:
- page_size = int(kwargs['limit'])
- request_kwargs['params']['$top'] = page_size
-
- if 'page' in kwargs:
- request_kwargs['params']['$skip'] = (int(kwargs['page']) - 1) * page_size
+ url = template.format(**url_kwargs)
- if 'format' in kwargs:
- request_kwargs['params']['format'] = kwargs['format']
-
- if 'templatename' in kwargs:
- request_kwargs['params']['templatename'] = kwargs['templatename']
-
- if request_method in ('PUT', 'POST'):
- request_kwargs['params']['returnBody'] = 'true'
-
- if 'headers' in kwargs:
- request_kwargs['headers'].update(kwargs['headers'])
-
- # Build body.
- if 'data' in kwargs:
- request_kwargs['json'] = kwargs['data']
+ # Build request kwargs (header/query/body)
+ request_kwargs = self.build_request_kwargs(
+ request_method,
+ data=kwargs.get('data'),
+ **request_kwargs_raw,
+ )
response = requests.request(request_method, url, **request_kwargs)
@@ -129,11 +99,66 @@ class Manager():
elif hasattr(self, method_name):
method_name = '%s_%s' % (method.lower(), method_name)
self.method_details[method_name] = {
- 'args': required_args,
+ 'kwargs': required_kwargs,
'hint': hint,
}
setattr(self, method_name, inner)
+ def build_request_kwargs(self, method, data=None, **kwargs):
+ request_kwargs = {}
+
+ # Build headers.
+ request_kwargs['headers'] = {
+ 'Authorization': 'Bearer %s' % self.credentials.oauth_token,
+ 'x-myobapi-cftoken': self.credentials.userpass,
+ 'x-myobapi-key': self.credentials.consumer_key,
+ 'x-myobapi-version': 'v2',
+ }
+ if 'headers' in kwargs:
+ request_kwargs['headers'].update(kwargs['headers'])
+
+ # Build query.
+ request_kwargs['params'] = {}
+ filters = []
+ for k, v in kwargs.items():
+ if k not in ['orderby', 'format', 'headers', 'page', 'limit', 'templatename']:
+ if isinstance(v, str):
+ v = [v]
+ operator = 'eq'
+ for op in ['lt', 'gt']:
+ if k.endswith('__%s' % op):
+ k = k[:-4]
+ operator = op
+ filters.append(' or '.join("%s %s '%s'" % (k, operator, v_) for v_ in v))
+ if filters:
+ request_kwargs['params']['$filter'] = ' and '.join(filters)
+
+ if 'orderby' in kwargs:
+ request_kwargs['params']['$orderby'] = kwargs['orderby']
+
+ page_size = DEFAULT_PAGE_SIZE
+ if 'limit' in kwargs:
+ page_size = int(kwargs['limit'])
+ request_kwargs['params']['$top'] = page_size
+
+ if 'page' in kwargs:
+ request_kwargs['params']['$skip'] = (int(kwargs['page']) - 1) * page_size
+
+ if 'format' in kwargs:
+ request_kwargs['params']['format'] = kwargs['format']
+
+ if 'templatename' in kwargs:
+ request_kwargs['params']['templatename'] = kwargs['templatename']
+
+ if method in ('PUT', 'POST'):
+ request_kwargs['params']['returnBody'] = 'true'
+
+ # Build body.
+ if data is not None:
+ request_kwargs['json'] = data
+
+ return request_kwargs
+
def __repr__(self):
def print_method(name, args):
return '%s(%s)' % (name, ', '.join(args))
@@ -144,7 +169,7 @@ class Manager():
)
return '%s%s:\n %s' % (self.name, self.__class__.__name__, '\n '.join(
formatstr % (
- print_method(k, v['args']),
+ print_method(k, v['kwargs']),
v['hint'],
) for k, v in sorted(self.method_details.items())
))
| Support for `gt` and `lt` filtering.
Hi there,
I can't find anything about this in the documentation, but does pymyob support query strings?
Thanks
Barton | uptick/pymyob | diff --git a/tests/endpoints.py b/tests/endpoints.py
index 156ae96..4d59358 100644
--- a/tests/endpoints.py
+++ b/tests/endpoints.py
@@ -12,12 +12,12 @@ DATA = {'dummy': 'data'}
class EndpointTests(TestCase):
def setUp(self):
- self.cred = PartnerCredentials(
+ cred = PartnerCredentials(
consumer_key='KeyToTheKingdom',
consumer_secret='TellNoOne',
callback_uri='CallOnlyWhenCalledTo',
)
- self.myob = Myob(self.cred)
+ self.myob = Myob(cred)
self.request_headers = {
'Authorization': 'Bearer None',
'x-myobapi-cftoken': None,
diff --git a/tests/managers.py b/tests/managers.py
index e69de29..71dcb10 100644
--- a/tests/managers.py
+++ b/tests/managers.py
@@ -0,0 +1,65 @@
+from unittest import TestCase
+
+from myob.constants import DEFAULT_PAGE_SIZE
+from myob.credentials import PartnerCredentials
+from myob.managers import Manager
+
+
+class QueryParamTests(TestCase):
+ def setUp(self):
+ cred = PartnerCredentials(
+ consumer_key='KeyToTheKingdom',
+ consumer_secret='TellNoOne',
+ callback_uri='CallOnlyWhenCalledTo',
+ )
+ self.manager = Manager('', credentials=cred)
+
+ def assertParamsEqual(self, raw_kwargs, expected_params, method='GET'):
+ self.assertEqual(
+ self.manager.build_request_kwargs(method, {}, **raw_kwargs)['params'],
+ expected_params
+ )
+
+ def test_filter(self):
+ self.assertParamsEqual({'Type': 'Customer'}, {'$filter': "Type eq 'Customer'"})
+ self.assertParamsEqual({'Type': ['Customer', 'Supplier']}, {'$filter': "Type eq 'Customer' or Type eq 'Supplier'"})
+ self.assertParamsEqual({'DisplayID__gt': '5-0000'}, {'$filter': "DisplayID gt '5-0000'"})
+ self.assertParamsEqual({'DateOccurred__lt': '2013-08-30T19:00:59.043'}, {'$filter': "DateOccurred lt '2013-08-30T19:00:59.043'"})
+ self.assertParamsEqual({'Type': ['Customer', 'Supplier'], 'DisplayID__gt': '5-0000'}, {'$filter': "Type eq 'Customer' or Type eq 'Supplier' and DisplayID gt '5-0000'"})
+
+ def test_orderby(self):
+ self.assertParamsEqual({'orderby': 'Date'}, {'$orderby': "Date"})
+
+ def test_pagination(self):
+ self.assertParamsEqual({'page': 7}, {'$skip': 6 * DEFAULT_PAGE_SIZE})
+ self.assertParamsEqual({'limit': 20}, {'$top': 20})
+ self.assertParamsEqual({'limit': 20, 'page': 7}, {'$top': 20, '$skip': 120})
+
+ def test_format(self):
+ self.assertParamsEqual({'format': 'json'}, {'format': 'json'})
+
+ def test_templatename(self):
+ self.assertParamsEqual({'templatename': 'InvoiceTemplate - 7'}, {'templatename': 'InvoiceTemplate - 7'})
+
+ def test_returnBody(self):
+ self.assertParamsEqual({}, {'returnBody': 'true'}, method='PUT')
+ self.assertParamsEqual({}, {'returnBody': 'true'}, method='POST')
+
+ def test_combination(self):
+ self.assertParamsEqual(
+ {
+ 'Type': ['Customer', 'Supplier'],
+ 'DisplayID__gt': '3-0900',
+ 'orderby': 'Date',
+ 'page': 5,
+ 'limit': 13,
+ 'format': 'json',
+ },
+ {
+ '$filter': "Type eq 'Customer' or Type eq 'Supplier' and DisplayID gt '3-0900'",
+ '$orderby': 'Date',
+ '$skip': 52,
+ '$top': 13,
+ 'format': 'json'
+ },
+ )
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | 0.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
coverage==7.8.0
exceptiongroup==1.2.2
execnet==2.1.1
idna==3.10
iniconfig==2.1.0
oauthlib==3.2.2
packaging==24.2
pluggy==1.5.0
-e git+https://github.com/uptick/pymyob.git@7baef26a62b54be57dd4dfbc80cf6962b04acf74#egg=pymyob
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
requests==2.32.3
requests-oauthlib==2.0.0
tomli==2.2.1
typing_extensions==4.13.0
urllib3==2.3.0
| name: pymyob
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- coverage==7.8.0
- exceptiongroup==1.2.2
- execnet==2.1.1
- idna==3.10
- iniconfig==2.1.0
- oauthlib==3.2.2
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- requests==2.32.3
- requests-oauthlib==2.0.0
- tomli==2.2.1
- typing-extensions==4.13.0
- urllib3==2.3.0
prefix: /opt/conda/envs/pymyob
| [
"tests/managers.py::QueryParamTests::test_combination",
"tests/managers.py::QueryParamTests::test_filter",
"tests/managers.py::QueryParamTests::test_format",
"tests/managers.py::QueryParamTests::test_orderby",
"tests/managers.py::QueryParamTests::test_pagination",
"tests/managers.py::QueryParamTests::test_returnBody",
"tests/managers.py::QueryParamTests::test_templatename"
] | [] | [
"tests/endpoints.py::EndpointTests::test_companyfiles",
"tests/endpoints.py::EndpointTests::test_contacts",
"tests/endpoints.py::EndpointTests::test_general_ledger",
"tests/endpoints.py::EndpointTests::test_inventory",
"tests/endpoints.py::EndpointTests::test_invoices",
"tests/endpoints.py::EndpointTests::test_purchase_orders"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,428 | 1,668 | [
"myob/managers.py"
] |
|
Azure__WALinuxAgent-1127 | d1f9e05b9eaa63997108ebf3de261bf9dca7a25d | 2018-04-20 21:42:00 | 6e9b985c1d7d564253a1c344bab01b45093103cd | diff --git a/azurelinuxagent/ga/exthandlers.py b/azurelinuxagent/ga/exthandlers.py
index 91285cf9..024c7f55 100644
--- a/azurelinuxagent/ga/exthandlers.py
+++ b/azurelinuxagent/ga/exthandlers.py
@@ -56,6 +56,7 @@ from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION
# HandlerEnvironment.json schema version
HANDLER_ENVIRONMENT_VERSION = 1.0
+EXTENSION_STATUS_ERROR = 'error'
VALID_EXTENSION_STATUS = ['transitioning', 'error', 'success', 'warning']
VALID_HANDLER_STATUS = ['Ready', 'NotReady', "Installing", "Unresponsive"]
@@ -107,14 +108,15 @@ def parse_ext_status(ext_status, data):
validate_has_key(data, 'status', 'status')
status_data = data['status']
validate_has_key(status_data, 'status', 'status/status')
-
- validate_in_range(status_data['status'], VALID_EXTENSION_STATUS,
- 'status/status')
+
+ status = status_data['status']
+ if status not in VALID_EXTENSION_STATUS:
+ status = EXTENSION_STATUS_ERROR
applied_time = status_data.get('configurationAppliedTime')
ext_status.configurationAppliedTime = applied_time
ext_status.operation = status_data.get('operation')
- ext_status.status = status_data.get('status')
+ ext_status.status = status
ext_status.code = status_data.get('code', 0)
formatted_message = status_data.get('formattedMessage')
ext_status.message = parse_formatted_message(formatted_message)
| Extension install failures timeout
The Windows GA reports a status which allows a fast failure, however the Linux GA just reports 'Not ready' which essentially waits for a CRP timeout. We should investigate if there is a substatus we are missing to allow a fast failure. | Azure/WALinuxAgent | diff --git a/tests/ga/test_exthandlers.py b/tests/ga/test_exthandlers.py
new file mode 100644
index 00000000..248750b1
--- /dev/null
+++ b/tests/ga/test_exthandlers.py
@@ -0,0 +1,74 @@
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the Apache License.
+import json
+
+from azurelinuxagent.common.protocol.restapi import ExtensionStatus
+from azurelinuxagent.ga.exthandlers import parse_ext_status
+from tests.tools import *
+
+
+class TestExtHandlers(AgentTestCase):
+ def test_parse_extension_status00(self):
+ """
+ Parse a status report for a successful execution of an extension.
+ """
+
+ s = '''[{
+ "status": {
+ "status": "success",
+ "formattedMessage": {
+ "lang": "en-US",
+ "message": "Command is finished."
+ },
+ "operation": "Daemon",
+ "code": "0",
+ "name": "Microsoft.OSTCExtensions.CustomScriptForLinux"
+ },
+ "version": "1.0",
+ "timestampUTC": "2018-04-20T21:20:24Z"
+ }
+]'''
+ ext_status = ExtensionStatus(seq_no=0)
+ parse_ext_status(ext_status, json.loads(s))
+
+ self.assertEqual('0', ext_status.code)
+ self.assertEqual(None, ext_status.configurationAppliedTime)
+ self.assertEqual('Command is finished.', ext_status.message)
+ self.assertEqual('Daemon', ext_status.operation)
+ self.assertEqual('success', ext_status.status)
+ self.assertEqual(0, ext_status.sequenceNumber)
+ self.assertEqual(0, len(ext_status.substatusList))
+
+ def test_parse_extension_status01(self):
+ """
+ Parse a status report for a failed execution of an extension.
+
+ The extension returned a bad status/status of failed.
+ The agent should handle this gracefully, and convert all unknown
+ status/status values into an error.
+ """
+
+ s = '''[{
+ "status": {
+ "status": "failed",
+ "formattedMessage": {
+ "lang": "en-US",
+ "message": "Enable failed: Failed with error: commandToExecute is empty or invalid ..."
+ },
+ "operation": "Enable",
+ "code": "0",
+ "name": "Microsoft.OSTCExtensions.CustomScriptForLinux"
+ },
+ "version": "1.0",
+ "timestampUTC": "2018-04-20T20:50:22Z"
+}]'''
+ ext_status = ExtensionStatus(seq_no=0)
+ parse_ext_status(ext_status, json.loads(s))
+
+ self.assertEqual('0', ext_status.code)
+ self.assertEqual(None, ext_status.configurationAppliedTime)
+ self.assertEqual('Enable failed: Failed with error: commandToExecute is empty or invalid ...', ext_status.message)
+ self.assertEqual('Enable', ext_status.operation)
+ self.assertEqual('error', ext_status.status)
+ self.assertEqual(0, ext_status.sequenceNumber)
+ self.assertEqual(0, len(ext_status.substatusList))
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 2.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pyasn1",
"nose",
"nose-cov",
"pytest"
],
"pre_install": null,
"python": "3.4",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
cov-core==1.15.0
coverage==6.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
nose==1.3.7
nose-cov==1.6
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyasn1==0.5.1
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
-e git+https://github.com/Azure/WALinuxAgent.git@d1f9e05b9eaa63997108ebf3de261bf9dca7a25d#egg=WALinuxAgent
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: WALinuxAgent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- cov-core==1.15.0
- coverage==6.2
- nose==1.3.7
- nose-cov==1.6
- pyasn1==0.5.1
prefix: /opt/conda/envs/WALinuxAgent
| [
"tests/ga/test_exthandlers.py::TestExtHandlers::test_parse_extension_status01"
] | [] | [
"tests/ga/test_exthandlers.py::TestExtHandlers::test_parse_extension_status00"
] | [] | Apache License 2.0 | 2,432 | 371 | [
"azurelinuxagent/ga/exthandlers.py"
] |
|
jupyter__nbgrader-947 | 919c56a9782647a97bd03a0c9d6d0ac5633db0a3 | 2018-04-22 13:37:42 | 5bc6f37c39c8b10b8f60440b2e6d9487e63ef3f1 | diff --git a/nbgrader/converters/autograde.py b/nbgrader/converters/autograde.py
index 62327f17..57c662ab 100644
--- a/nbgrader/converters/autograde.py
+++ b/nbgrader/converters/autograde.py
@@ -2,7 +2,7 @@ import os
import shutil
from textwrap import dedent
-from traitlets import Bool, List
+from traitlets import Bool, List, Dict
from .base import BaseConverter, NbGraderException
from ..preprocessors import (
@@ -24,6 +24,19 @@ class Autograde(BaseConverter):
)
).tag(config=True)
+ exclude_overwriting = Dict(
+ {},
+ help=dedent(
+ """
+ A dictionary with keys corresponding to assignment names and values
+ being a list of filenames (relative to the assignment's source
+ directory) that should NOT be overwritten with the source version.
+ This is to allow students to e.g. edit a python file and submit it
+ alongside the notebooks in their assignment.
+ """
+ )
+ ).tag(config=True)
+
_sanitizing = True
@property
@@ -109,7 +122,9 @@ class Autograde(BaseConverter):
self.log.info("Overwriting files with master versions from the source directory")
dest_path = self._format_dest(assignment_id, student_id)
source_path = self.coursedir.format_path(self.coursedir.source_directory, '.', assignment_id)
- source_files = utils.find_all_files(source_path, self.coursedir.ignore + ["*.ipynb"])
+ source_files = set(utils.find_all_files(source_path, self.coursedir.ignore + ["*.ipynb"]))
+ exclude_files = set([os.path.join(source_path, x) for x in self.exclude_overwriting.get(assignment_id, [])])
+ source_files = list(source_files - exclude_files)
# copy them to the build directory
for filename in source_files:
| Have submitted notebooks import from local directory
I had students edit a python file and then submit it along with the notebooks. However, when I run the autograder, nbgrader imports the python file from my source directory instead of the submitted one. This, of course, leads to the test cells that test their implementation always passing no matter what they do (and also makes it so that, if they added and rely on any further functionality that's not in my solution, then those blocks fail!). Is there any way to have the submitted notebooks import from the submitted .py file?
| jupyter/nbgrader | diff --git a/nbgrader/tests/apps/test_nbgrader_autograde.py b/nbgrader/tests/apps/test_nbgrader_autograde.py
index 091564e7..ba44d44b 100644
--- a/nbgrader/tests/apps/test_nbgrader_autograde.py
+++ b/nbgrader/tests/apps/test_nbgrader_autograde.py
@@ -389,20 +389,24 @@ class TestNbGraderAutograde(BaseTestApp):
"""Are dependent files properly linked and overwritten?"""
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.CourseDirectory.db_assignments = [dict(name='ps1', duedate='2015-02-02 14:58:23.948203 PST')]\n""")
- fh.write("""c.CourseDirectory.db_students = [dict(id="foo"), dict(id="bar")]""")
+ fh.write("""c.CourseDirectory.db_students = [dict(id="foo"), dict(id="bar")]\n""")
+ fh.write("""c.Autograde.exclude_overwriting = {"ps1": ["helper.py"]}\n""")
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
self._make_file(join(course_dir, "source", "ps1", "data.csv"), "some,data\n")
+ self._make_file(join(course_dir, "source", "ps1", "helper.py"), "print('hello!')\n")
run_nbgrader(["assign", "ps1", "--db", db])
self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 PST")
self._make_file(join(course_dir, "submitted", "foo", "ps1", "data.csv"), "some,other,data\n")
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "helper.py"), "print('this is different!')\n")
run_nbgrader(["autograde", "ps1", "--db", db])
assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt"))
assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data.csv"))
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "helper.py"))
with open(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt"), "r") as fh:
contents = fh.read()
@@ -412,6 +416,45 @@ class TestNbGraderAutograde(BaseTestApp):
contents = fh.read()
assert contents == "some,data\n"
+ with open(join(course_dir, "autograded", "foo", "ps1", "helper.py"), "r") as fh:
+ contents = fh.read()
+ assert contents == "print('this is different!')\n"
+
+ def test_grade_overwrite_files_subdirs(self, db, course_dir):
+ """Are dependent files properly linked and overwritten?"""
+ with open("nbgrader_config.py", "a") as fh:
+ fh.write("""c.CourseDirectory.db_assignments = [dict(name='ps1', duedate='2015-02-02 14:58:23.948203 PST')]\n""")
+ fh.write("""c.CourseDirectory.db_students = [dict(id="foo"), dict(id="bar")]\n""")
+ fh.write("""c.Autograde.exclude_overwriting = {{"ps1": ["{}"]}}\n""".format(os.path.join("subdir", "helper.py")))
+
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
+ self._make_file(join(course_dir, "source", "ps1", "subdir", "data.csv"), "some,data\n")
+ self._make_file(join(course_dir, "source", "ps1", "subdir", "helper.py"), "print('hello!')\n")
+ run_nbgrader(["assign", "ps1", "--db", db])
+
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "timestamp.txt"), "2015-02-02 15:58:23.948203 PST")
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "subdir", "data.csv"), "some,other,data\n")
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "subdir", "helper.py"), "print('this is different!')\n")
+ run_nbgrader(["autograde", "ps1", "--db", db])
+
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt"))
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "subdir", "data.csv"))
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "subdir", "helper.py"))
+
+ with open(join(course_dir, "autograded", "foo", "ps1", "timestamp.txt"), "r") as fh:
+ contents = fh.read()
+ assert contents == "2015-02-02 15:58:23.948203 PST"
+
+ with open(join(course_dir, "autograded", "foo", "ps1", "subdir", "data.csv"), "r") as fh:
+ contents = fh.read()
+ assert contents == "some,data\n"
+
+ with open(join(course_dir, "autograded", "foo", "ps1", "subdir", "helper.py"), "r") as fh:
+ contents = fh.read()
+ assert contents == "print('this is different!')\n"
+
def test_side_effects(self, db, course_dir):
with open("nbgrader_config.py", "a") as fh:
fh.write("""c.CourseDirectory.db_assignments = [dict(name='ps1', duedate='2015-02-02 14:58:23.948203 PST')]\n""")
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pyenchant",
"sphinxcontrib-spelling",
"sphinx_rtd_theme",
"nbval",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
comm==0.1.4
contextvars==2.4
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@919c56a9782647a97bd03a0c9d6d0ac5633db0a3#egg=nbgrader
nbval==0.10.0
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- comm==0.1.4
- contextvars==2.4
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbval==0.10.0
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- send2trash==1.8.3
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_overwrite_files",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_overwrite_files_subdirs"
] | [
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_force_single_notebook",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_update_newer_single_notebook"
] | [
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_help",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_missing_student",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_missing_assignment",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_timestamp",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_empty_timestamp",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_late_submission_penalty_none",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_late_submission_penalty_zero",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_late_submission_penalty_plugin",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_force",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_filter_notebook",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_side_effects",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_skip_extra_notebooks",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_permissions",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_custom_permissions",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_update_newer",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_hidden_tests_single_notebook",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_handle_failure",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_handle_failure_single_notebook",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_missing_source_kernelspec",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_incorrect_source_kernelspec",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_incorrect_submitted_kernelspec",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_no_execute",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_infinite_loop",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_missing_files",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_missing_notebook"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,435 | 478 | [
"nbgrader/converters/autograde.py"
] |
|
Eyepea__aiosip-111 | 3219ca46cdfd115a72101d92cd1f717f78d9f7b9 | 2018-04-23 21:51:49 | 3219ca46cdfd115a72101d92cd1f717f78d9f7b9 | diff --git a/aiosip/transaction.py b/aiosip/transaction.py
index da61942..4f2b0ab 100644
--- a/aiosip/transaction.py
+++ b/aiosip/transaction.py
@@ -145,6 +145,9 @@ class FutureTransaction(BaseTransaction):
self.dialog.end_transaction(self)
def _result(self, msg):
+ if self.authentification:
+ self.authentification.cancel()
+ self.authentification = None
self._future.set_result(msg)
self.dialog.end_transaction(self)
| Improper handling of 401 authentication failures
In a call flow such as
* send REGISTER
* get 401 response with auth challenge
* send REGISTER with valid authentication
* get 401 response with no challenge (ie: your credentials are fine, but still denied)
The client will continue to retransmit the request with the authorization header | Eyepea/aiosip | diff --git a/tests/test_sip_scenario.py b/tests/test_sip_scenario.py
index d38e7c3..fdb1a89 100644
--- a/tests/test_sip_scenario.py
+++ b/tests/test_sip_scenario.py
@@ -92,6 +92,53 @@ async def test_authentication(test_server, protocol, loop, from_details, to_deta
await app.close()
+async def test_authentication_rejection(test_server, protocol, loop, from_details, to_details):
+ received_messages = list()
+
+ class Dialplan(aiosip.BaseDialplan):
+
+ async def resolve(self, *args, **kwargs):
+ await super().resolve(*args, **kwargs)
+ return self.subscribe
+
+ async def subscribe(self, request, message):
+ dialog = request._create_dialog()
+
+ received_messages.append(message)
+ await dialog.unauthorized(message)
+
+ async for message in dialog:
+ received_messages.append(message)
+ await dialog.reply(message, 401)
+
+ app = aiosip.Application(loop=loop)
+ server_app = aiosip.Application(loop=loop, dialplan=Dialplan())
+ server = await test_server(server_app)
+
+ peer = await app.connect(
+ protocol=protocol,
+ remote_addr=(
+ server.sip_config['server_host'],
+ server.sip_config['server_port'],
+ )
+ )
+
+ result = await peer.register(
+ expires=1800,
+ from_details=aiosip.Contact.from_header(from_details),
+ to_details=aiosip.Contact.from_header(to_details),
+ password='testing_pass',
+ )
+
+ # wait long enough to ensure no improper retransmit
+ await asyncio.sleep(1)
+ assert len(received_messages) == 2
+ assert result.status_code == 401
+
+ await server_app.close()
+ await app.close()
+
+
async def test_invite(test_server, protocol, loop, from_details, to_details):
call_established = loop.create_future()
call_disconnected = loop.create_future()
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "Pipfile",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aiodns==3.2.0
-e git+https://github.com/Eyepea/aiosip.git@3219ca46cdfd115a72101d92cd1f717f78d9f7b9#egg=aiosip
attrs==22.2.0
certifi==2021.5.30
cffi==1.15.1
coverage==6.2
cssselect==1.1.0
importlib-metadata==4.8.3
iniconfig==1.1.1
lxml==5.3.1
multidict==5.2.0
packaging==21.3
pipfile==0.0.2
pluggy==1.0.0
py==1.11.0
pycares==4.3.0
pycparser==2.21
pyparsing==3.1.4
pyquery==1.4.3
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions==4.1.1
websockets==9.1
zipp==3.6.0
| name: aiosip
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- pipfile=0.0.2=py_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- aiodns==3.2.0
- attrs==22.2.0
- cffi==1.15.1
- coverage==6.2
- cssselect==1.1.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- lxml==5.3.1
- multidict==5.2.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pycares==4.3.0
- pycparser==2.21
- pyparsing==3.1.4
- pyquery==1.4.3
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- tomli==1.2.3
- typing-extensions==4.1.1
- websockets==9.1
- zipp==3.6.0
prefix: /opt/conda/envs/aiosip
| [
"tests/test_sip_scenario.py::test_authentication_rejection[udp]",
"tests/test_sip_scenario.py::test_authentication_rejection[tcp]",
"tests/test_sip_scenario.py::test_invite[udp]",
"tests/test_sip_scenario.py::test_cancel[udp]"
] | [] | [
"tests/test_sip_scenario.py::test_notify[udp]",
"tests/test_sip_scenario.py::test_notify[tcp]",
"tests/test_sip_scenario.py::test_authentication[udp]",
"tests/test_sip_scenario.py::test_authentication[tcp]",
"tests/test_sip_scenario.py::test_invite[tcp]",
"tests/test_sip_scenario.py::test_cancel[tcp]"
] | [] | Apache License 2.0 | 2,441 | 139 | [
"aiosip/transaction.py"
] |
|
datosgobar__pydatajson-153 | dae546a739eb2aab1c34b3d8bbb5896fe804e0aa | 2018-04-24 17:27:32 | adb85a7de7dfa073ddf9817a5fe2d125f9ce4e54 | diff --git a/pydatajson/federation.py b/pydatajson/federation.py
index 43e932e..b503d95 100644
--- a/pydatajson/federation.py
+++ b/pydatajson/federation.py
@@ -5,11 +5,13 @@ de la API de CKAN.
"""
from __future__ import print_function
+import logging
from ckanapi import RemoteCKAN
-from ckanapi.errors import NotFound
+from ckanapi.errors import NotFound, NotAuthorized
from .ckan_utils import map_dataset_to_package, map_theme_to_group
from .search import get_datasets
+logger = logging.getLogger(__name__)
def push_dataset_to_ckan(catalog, owner_org, dataset_origin_identifier,
portal_url, apikey, catalog_id=None,
@@ -250,14 +252,20 @@ def harvest_catalog_to_ckan(catalog, portal_url, apikey, catalog_id,
Returns:
str: El id del dataset en el catálogo de destino.
"""
- dataset_list = dataset_list or [ds['identifier']
- for ds in catalog.datasets]
+ # Evitar entrar con valor falsy
+ if dataset_list is None:
+ dataset_list = [ds['identifier'] for ds in catalog.datasets]
owner_org = owner_org or catalog_id
harvested = []
for dataset_id in dataset_list:
- harvested_id = harvest_dataset_to_ckan(
- catalog, owner_org, dataset_id, portal_url, apikey, catalog_id)
- harvested.append(harvested_id)
+ try:
+ harvested_id = harvest_dataset_to_ckan(
+ catalog, owner_org, dataset_id, portal_url, apikey, catalog_id)
+ harvested.append(harvested_id)
+ except (NotAuthorized, NotFound, KeyError, TypeError) as e:
+ logger.error("Error federando catalogo:"+catalog_id+", dataset:"+dataset_id + "al portal: "+portal_url)
+ logger.error(str(e))
+
return harvested
| Robustecer el manejo de harvest_catalog_to_ckan()
Hay que corregir 2 problemas:
- En caso de pasar una lista vacía en el dataset list, no se debe federar ningún dataset. Actualmente se federan todos.
-En caso de que alguno de las llamadas a `harvest_dataset_to_ckan()` falle, loggear y continuar con el resto. Actualmente la federación entera del catálogo levanta la excepción. | datosgobar/pydatajson | diff --git a/tests/test_federation.py b/tests/test_federation.py
index fe95079..9d0515f 100644
--- a/tests/test_federation.py
+++ b/tests/test_federation.py
@@ -223,6 +223,13 @@ class PushDatasetTestCase(unittest.TestCase):
self.assertCountEqual([self.catalog_id+'_'+ds['identifier'] for ds in self.catalog.datasets],
harvested_ids)
+ @patch('pydatajson.federation.RemoteCKAN', autospec=True)
+ def test_harvest_catalog_with_empty_list(self, mock_portal):
+ harvested_ids = harvest_catalog_to_ckan(self.catalog, 'portal', 'key', self.catalog_id,
+ owner_org='owner', dataset_list=[])
+ mock_portal.assert_not_called()
+ self.assertEqual([], harvested_ids)
+
class RemoveDatasetTestCase(unittest.TestCase):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
chardet==3.0.4
ckanapi==4.0
docopt==0.6.2
et-xmlfile==1.1.0
idna==2.6
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.0
jdcal==1.4.1
jsonschema==2.6.0
openpyxl==2.4.11
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/datosgobar/pydatajson.git@dae546a739eb2aab1c34b3d8bbb5896fe804e0aa#egg=pydatajson
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.6.1
requests==2.18.4
rfc3987==1.3.7
six==1.11.0
tomli==1.2.3
typing_extensions==4.1.1
unicodecsv==0.14.1
Unidecode==0.4.21
urllib3==1.22
zipp==3.6.0
| name: pydatajson
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- chardet==3.0.4
- ckanapi==4.0
- docopt==0.6.2
- et-xmlfile==1.1.0
- idna==2.6
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.0
- jdcal==1.4.1
- jsonschema==2.6.0
- openpyxl==2.4.11
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.6.1
- requests==2.18.4
- rfc3987==1.3.7
- six==1.11.0
- tomli==1.2.3
- typing-extensions==4.1.1
- unicodecsv==0.14.1
- unidecode==0.04.21
- urllib3==1.22
- zipp==3.6.0
prefix: /opt/conda/envs/pydatajson
| [
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_empty_list"
] | [] | [
"tests/test_federation.py::PushDatasetTestCase::test_dataset_id_is_preserved_if_catalog_id_is_not_passed",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_level_wrappers",
"tests/test_federation.py::PushDatasetTestCase::test_dataset_without_license_sets_notspecified",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_dataset_list",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_no_optional_parametres",
"tests/test_federation.py::PushDatasetTestCase::test_harvest_catalog_with_owner_org",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_created_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_id_is_updated_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_licenses_are_interpreted_correctly",
"tests/test_federation.py::PushDatasetTestCase::test_tags_are_passed_correctly",
"tests/test_federation.py::RemoveDatasetTestCase::test_empty_search_doesnt_call_purge",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_filter_in_out_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_one_dataset",
"tests/test_federation.py::RemoveDatasetTestCase::test_query_over_500_datasets",
"tests/test_federation.py::RemoveDatasetTestCase::test_remove_through_filters_and_organization",
"tests/test_federation.py::PushThemeTestCase::test_ckan_portal_is_called_with_correct_parametres",
"tests/test_federation.py::PushThemeTestCase::test_empty_theme_search_raises_exception",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_identifier",
"tests/test_federation.py::PushThemeTestCase::test_function_pushes_theme_by_label",
"tests/test_federation.py::PushCatalogThemesTestCase::test_empty_portal_pushes_every_theme",
"tests/test_federation.py::PushCatalogThemesTestCase::test_full_portal_pushes_nothing",
"tests/test_federation.py::PushCatalogThemesTestCase::test_non_empty_intersection_pushes_missing_themes"
] | [] | MIT License | 2,443 | 454 | [
"pydatajson/federation.py"
] |
|
ELIFE-ASU__Neet-105 | 041332432596020896894dbaa66282010db9e065 | 2018-04-25 16:50:40 | 041332432596020896894dbaa66282010db9e065 | diff --git a/neet/boolean/logicnetwork.py b/neet/boolean/logicnetwork.py
index b9342f1..b173fdf 100644
--- a/neet/boolean/logicnetwork.py
+++ b/neet/boolean/logicnetwork.py
@@ -109,13 +109,13 @@ class LogicNetwork(object):
# Encode the mask.
mask_code = long(0)
for idx in indices:
- mask_code += 2 ** idx # Low order, low index.
+ mask_code += 2 ** long(idx) # Low order, low index.
# Encode each condition of truth table.
encoded_sub_table = set()
for condition in conditions:
encoded_condition = long(0)
for idx, state in zip(indices, condition):
- encoded_condition += 2 ** idx if long(state) else 0
+ encoded_condition += 2 ** long(idx) if int(state) else 0
encoded_sub_table.add(encoded_condition)
self._encoded_table.append((mask_code, encoded_sub_table))
| LogicNetwork table encoding issue
See comments on the team_grn slack channel. I'll add more here later. | ELIFE-ASU/Neet | diff --git a/test/test_logic.py b/test/test_logic.py
index 523d2d9..304c019 100644
--- a/test/test_logic.py
+++ b/test/test_logic.py
@@ -2,7 +2,8 @@
# Use of this source code is governed by a MIT
# license that can be found in the LICENSE file.
"""Unit test for LogicNetwork"""
-import unittest
+import unittest, numpy as np
+from neet.python3 import *
from neet.boolean import LogicNetwork
from neet.exceptions import FormatError
@@ -27,6 +28,16 @@ class TestLogicNetwork(unittest.TestCase):
self.assertEqual(['A', 'B'], net.names)
self.assertEqual([(2, {0, 2}), (1, {1})], net._encoded_table)
+ def test_init_long(self):
+ table = [((), set()) for _ in range(65)]
+ table[0] = ((np.int64(64),), set('1'))
+
+ mask = long(2)**64
+
+ net = LogicNetwork(table)
+ self.assertEqual(net.table, table)
+ self.assertEqual(net._encoded_table[0], (mask, set([mask])))
+
def test_inplace_update(self):
net = LogicNetwork([((1,), {'0', '1'}), ((0,), {'1'})])
state = [0, 1]
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"nose-cov",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
cov-core==1.15.0
coverage==6.2
decorator==4.4.2
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/ELIFE-ASU/Neet.git@041332432596020896894dbaa66282010db9e065#egg=neet
networkx==2.5.1
nose==1.3.7
nose-cov==1.6
numpy==1.19.5
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyinform==0.2.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: Neet
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- cov-core==1.15.0
- coverage==6.2
- decorator==4.4.2
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- networkx==2.5.1
- nose==1.3.7
- nose-cov==1.6
- numpy==1.19.5
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyinform==0.2.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/Neet
| [
"test/test_logic.py::TestLogicNetwork::test_init_long"
] | [] | [
"test/test_logic.py::TestLogicNetwork::test_has_metadata",
"test/test_logic.py::TestLogicNetwork::test_init",
"test/test_logic.py::TestLogicNetwork::test_inplace_update",
"test/test_logic.py::TestLogicNetwork::test_is_fixed_sized",
"test/test_logic.py::TestLogicNetwork::test_is_network",
"test/test_logic.py::TestLogicNetwork::test_logic_simple_read",
"test/test_logic.py::TestLogicNetwork::test_logic_simple_read_custom_comment",
"test/test_logic.py::TestLogicNetwork::test_logic_simple_read_empty",
"test/test_logic.py::TestLogicNetwork::test_logic_simple_read_no_commas",
"test/test_logic.py::TestLogicNetwork::test_logic_simple_read_no_header",
"test/test_logic.py::TestLogicNetwork::test_logic_simple_read_no_node_headers",
"test/test_logic.py::TestLogicNetwork::test_neighbors_both",
"test/test_logic.py::TestLogicNetwork::test_neighbors_in",
"test/test_logic.py::TestLogicNetwork::test_neighbors_out",
"test/test_logic.py::TestLogicNetwork::test_node_dependency",
"test/test_logic.py::TestLogicNetwork::test_reduce_table",
"test/test_logic.py::TestLogicNetwork::test_to_networkx_graph_names",
"test/test_logic.py::TestLogicNetwork::test_to_networkx_graph_names_fail",
"test/test_logic.py::TestLogicNetwork::test_to_networkx_metadata",
"test/test_logic.py::TestLogicNetwork::test_update",
"test/test_logic.py::TestLogicNetwork::test_update_exceptions"
] | [] | MIT License | 2,446 | 243 | [
"neet/boolean/logicnetwork.py"
] |
|
G-Node__python-odml-284 | bc4bade4c93e0d5cb3ab8c0fb427fcf3c0ed96e1 | 2018-04-26 14:46:25 | eeff5922987b064681d1328f81af317d8171808f | diff --git a/odml/format.py b/odml/format.py
index bae2d68..7a0a796 100644
--- a/odml/format.py
+++ b/odml/format.py
@@ -130,7 +130,7 @@ class Section(Format):
_args = {
'id': 0,
'type': 1,
- 'name': 0,
+ 'name': 1,
'definition': 0,
'reference': 0,
'link': 0,
diff --git a/odml/property.py b/odml/property.py
index 2602dea..f6d0211 100644
--- a/odml/property.py
+++ b/odml/property.py
@@ -13,7 +13,7 @@ class BaseProperty(base.BaseObject):
"""An odML Property"""
_format = frmt.Property
- def __init__(self, name, value=None, parent=None, unit=None,
+ def __init__(self, name=None, value=None, parent=None, unit=None,
uncertainty=None, reference=None, definition=None,
dependency=None, dependency_value=None, dtype=None,
value_origin=None, id=None):
@@ -58,6 +58,11 @@ class BaseProperty(base.BaseObject):
print(e)
self._id = str(uuid.uuid4())
+ # Use id if no name was provided.
+ if not name:
+ name = self._id
+
+ self._name = name
self._parent = None
self._name = name
self._value_origin = value_origin
@@ -118,6 +123,14 @@ class BaseProperty(base.BaseObject):
@name.setter
def name(self, new_name):
+ if self.name == new_name:
+ return
+
+ curr_parent = self.parent
+ if hasattr(curr_parent, "properties") and new_name in curr_parent.properties:
+
+ raise KeyError("Object with the same name already exists!")
+
self._name = new_name
def __repr__(self):
diff --git a/odml/section.py b/odml/section.py
index fa08c1c..4707003 100644
--- a/odml/section.py
+++ b/odml/section.py
@@ -25,7 +25,7 @@ class BaseSection(base.Sectionable):
_format = format.Section
- def __init__(self, name, type=None, parent=None,
+ def __init__(self, name=None, type=None, parent=None,
definition=None, reference=None,
repository=None, link=None, include=None, id=None):
@@ -42,6 +42,10 @@ class BaseSection(base.Sectionable):
print(e)
self._id = str(uuid.uuid4())
+ # Use id if no name was provided.
+ if not name:
+ name = self._id
+
self._parent = None
self._name = name
self._definition = definition
@@ -94,6 +98,13 @@ class BaseSection(base.Sectionable):
@name.setter
def name(self, new_value):
+ if self.name == new_value:
+ return
+
+ curr_parent = self.parent
+ if hasattr(curr_parent, "sections") and new_value in curr_parent.sections:
+ raise KeyError("Object with the same name already exists!")
+
self._name = new_value
@property
diff --git a/odml/tools/odmlparser.py b/odml/tools/odmlparser.py
index fbc7c71..2edd2e5 100644
--- a/odml/tools/odmlparser.py
+++ b/odml/tools/odmlparser.py
@@ -48,6 +48,10 @@ class ODMLWriter:
raise ParserException(msg)
with open(filename, 'w') as file:
+ # Add XML header to support odML stylesheets.
+ if self.parser == 'XML':
+ file.write(xmlparser.XMLWriter.header)
+
file.write(self.to_string(odml_document))
def to_string(self, odml_document):
diff --git a/odml/tools/xmlparser.py b/odml/tools/xmlparser.py
index f2ea862..c935c99 100644
--- a/odml/tools/xmlparser.py
+++ b/odml/tools/xmlparser.py
@@ -5,11 +5,11 @@ Parses odML files. Can be invoked standalone:
python -m odml.tools.xmlparser file.odml
"""
import csv
+import sys
from lxml import etree as ET
from lxml.builder import E
# this is needed for py2exe to include lxml completely
from lxml import _elementpath as _dummy
-import sys
try:
from StringIO import StringIO
@@ -118,10 +118,9 @@ class XMLWriter:
else:
data = str(self)
- f = open(filename, "w")
- f.write(self.header)
- f.write(data)
- f.close()
+ with open(filename, "w") as file:
+ file.write(self.header)
+ file.write(data)
def load(filename):
@@ -223,18 +222,20 @@ class XMLReader(object):
return None # won't be able to parse this one
return getattr(self, "parse_" + node.tag)(node, self.tags[node.tag])
- def parse_tag(self, root, fmt, insert_children=True, create=None):
+ def parse_tag(self, root, fmt, insert_children=True):
"""
Parse an odml node based on the format description *fmt*
- and a function *create* to instantiate a corresponding object
+ and instantiate the corresponding object.
+ :param root: lxml.etree node containing an odML object or object tree.
+ :param fmt: odML class corresponding to the content of the root node.
+ :param insert_children: Bool value. When True, child elements of the root node
+ will be parsed to their odML equivalents and appended to
+ the odML document. When False, child elements of the
+ root node will be ignored.
"""
arguments = {}
extra_args = {}
children = []
- text = []
-
- if root.text:
- text.append(root.text.strip())
for k, v in root.attrib.iteritems():
k = k.lower()
@@ -258,8 +259,6 @@ class XMLReader(object):
else:
tag = fmt.map(node.tag)
if tag in arguments:
- # TODO make this an error, however first figure out a
- # way to let <odML version=><version/> pass
self.warn("Element <%s> is given multiple times in "
"<%s> tag" % (node.tag, root.tag), node)
@@ -273,38 +272,21 @@ class XMLReader(object):
else:
self.error("Invalid element <%s> in odML document section <%s>"
% (node.tag, root.tag), node)
- if node.tail:
- text.append(node.tail.strip())
if sys.version_info > (3,):
- self.check_mandatory_arguments(dict(list(arguments.items()) +
- list(extra_args.items())),
- fmt, root.tag, root)
+ check_args = dict(list(arguments.items()) + list(extra_args.items()))
else:
- self.check_mandatory_arguments(dict(arguments.items() +
- extra_args.items()),
- fmt, root.tag, root)
- if create is None:
- obj = fmt.create()
- else:
- obj = create(args=arguments, text=''.join(text), children=children)
+ check_args = dict(arguments.items() + extra_args.items())
- for k, v in arguments.items():
- if hasattr(obj, k) and (getattr(obj, k) is None or k == 'id'):
- try:
- if k == 'id' and v is not None:
- obj._id = v
- else:
- setattr(obj, k, v)
- except Exception as e:
- self.warn("cannot set '%s' property on <%s>: %s" %
- (k, root.tag, repr(e)), root)
- if not self.ignore_errors:
- raise e
+ self.check_mandatory_arguments(check_args, fmt, root.tag, root)
+
+ # Instantiate the current odML object with the parsed attributes.
+ obj = fmt.create(**arguments)
if insert_children:
for child in children:
obj.append(child)
+
return obj
def parse_odML(self, root, fmt):
@@ -312,24 +294,10 @@ class XMLReader(object):
return doc
def parse_section(self, root, fmt):
- name = root.get("name") # property name= overrides
- if name is None: # the element
- name_node = root.find("name")
- if name_node is not None:
- name = name_node.text
- root.remove(name_node)
- # delete the name_node so its value won't
- # be used to overwrite the already set name-attribute
-
- if name is None:
- self.error("Missing name element in <section>", root)
-
- return self.parse_tag(root, fmt,
- create=lambda **kargs: fmt.create(name))
+ return self.parse_tag(root, fmt)
def parse_property(self, root, fmt):
- create = lambda children, args, **kargs: fmt.create(**args)
- return self.parse_tag(root, fmt, insert_children=False, create=create)
+ return self.parse_tag(root, fmt, insert_children=False)
if __name__ == '__main__':
| odML Format update
Define Section `name` and `type` as well as Property `name` as required in `format.py`. | G-Node/python-odml | diff --git a/test/test_property.py b/test/test_property.py
index 9138cae..9eafedb 100644
--- a/test/test_property.py
+++ b/test/test_property.py
@@ -327,6 +327,40 @@ class TestProperty(unittest.TestCase):
assert(p.dtype == 'string')
assert(p.value == ['7', '20', '1 Dog', 'Seven'])
+ def test_name(self):
+ # Test id is used when name is not provided
+ p = Property()
+ self.assertIsNotNone(p.name)
+ self.assertEqual(p.name, p.id)
+
+ # Test name is properly set on init
+ name = "rumpelstilzchen"
+ p = Property(name)
+ self.assertEqual(p.name, name)
+
+ # Test name can be properly set on single and connected Properties
+ prop = Property()
+ self.assertNotEqual(prop.name, "prop")
+ prop.name = "prop"
+ self.assertEqual(prop.name, "prop")
+
+ sec = Section()
+ prop_a = Property(parent=sec)
+ self.assertNotEqual(prop_a.name, "prop_a")
+ prop_a.name = "prop_a"
+ self.assertEqual(prop_a.name, "prop_a")
+
+ # Test property name can be changed with siblings
+ prop_b = Property(name="prop_b", parent=sec)
+ self.assertEqual(prop_b.name, "prop_b")
+ prop_b.name = "prop"
+ self.assertEqual(prop_b.name, "prop")
+
+ # Test property name set will fail on existing sibling with same name
+ with self.assertRaises(KeyError):
+ prop_b.name = "prop_a"
+ self.assertEqual(prop_b.name, "prop")
+
def test_parent(self):
p = Property("property_section", parent=Section("S"))
self.assertIsInstance(p.parent, BaseSection)
diff --git a/test/test_section.py b/test/test_section.py
index 84604aa..5581928 100644
--- a/test/test_section.py
+++ b/test/test_section.py
@@ -39,6 +39,50 @@ class TestSection(unittest.TestCase):
sec.definition = ""
self.assertIsNone(sec.definition)
+ def test_name(self):
+ # Test id is used when name is not provided
+ s = Section()
+ self.assertIsNotNone(s.name)
+ self.assertEqual(s.name, s.id)
+
+ # Test name is properly set on init
+ name = "rumpelstilzchen"
+ s = Section(name)
+ self.assertEqual(s.name, name)
+
+ name = "rumpelstilzchen"
+ s = Section(name=name)
+ self.assertEqual(s.name, name)
+
+ # Test name can be properly set on single and connected Sections
+ sec = Section()
+ self.assertNotEqual(sec.name, "sec")
+ sec.name = "sec"
+ self.assertEqual(sec.name, "sec")
+
+ subsec_a = Section(parent=sec)
+ self.assertNotEqual(subsec_a.name, "subsec_a")
+ subsec_a.name = "subsec_a"
+ self.assertEqual(subsec_a.name, "subsec_a")
+
+ # Test subsection name can be changed with siblings
+ subsec_b = Section(name="subsec_b", parent=sec)
+ self.assertEqual(subsec_b.name, "subsec_b")
+ subsec_b.name = "subsec"
+ self.assertEqual(subsec_b.name, "subsec")
+
+ # Test subsection name set will fail on existing sibling with same name
+ with self.assertRaises(KeyError):
+ subsec_b.name = "subsec_a"
+ self.assertEqual(subsec_b.name, "subsec")
+
+ # Test section name set will fail on existing same name document sibling
+ doc = Document()
+ sec_a = Section(name="a", parent=doc)
+ sec_b = Section(name="b", parent=doc)
+ with self.assertRaises(KeyError):
+ sec_b.name = "a"
+
def test_parent(self):
s = Section("Section")
self.assertIsNone(s.parent)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 3
},
"num_modified_files": 5
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y libxml2-dev libxslt1-dev lib32z1-dev"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
isodate==0.7.2
lxml==5.3.1
-e git+https://github.com/G-Node/python-odml.git@bc4bade4c93e0d5cb3ab8c0fb427fcf3c0ed96e1#egg=odML
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pyparsing==3.2.3
pytest @ file:///croot/pytest_1738938843180/work
PyYAML==6.0.2
rdflib==7.1.4
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: python-odml
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- isodate==0.7.2
- lxml==5.3.1
- pyparsing==3.2.3
- pyyaml==6.0.2
- rdflib==7.1.4
prefix: /opt/conda/envs/python-odml
| [
"test/test_property.py::TestProperty::test_name",
"test/test_section.py::TestSection::test_name"
] | [] | [
"test/test_property.py::TestProperty::test_bool_conversion",
"test/test_property.py::TestProperty::test_clone",
"test/test_property.py::TestProperty::test_dtype",
"test/test_property.py::TestProperty::test_get_merged_equivalent",
"test/test_property.py::TestProperty::test_get_path",
"test/test_property.py::TestProperty::test_get_set_value",
"test/test_property.py::TestProperty::test_id",
"test/test_property.py::TestProperty::test_merge",
"test/test_property.py::TestProperty::test_merge_check",
"test/test_property.py::TestProperty::test_new_id",
"test/test_property.py::TestProperty::test_parent",
"test/test_property.py::TestProperty::test_simple_attributes",
"test/test_property.py::TestProperty::test_str_to_int_convert",
"test/test_property.py::TestProperty::test_value",
"test/test_property.py::TestProperty::test_value_append",
"test/test_property.py::TestProperty::test_value_extend",
"test/test_section.py::TestSection::test_append",
"test/test_section.py::TestSection::test_children",
"test/test_section.py::TestSection::test_clone",
"test/test_section.py::TestSection::test_contains",
"test/test_section.py::TestSection::test_extend",
"test/test_section.py::TestSection::test_id",
"test/test_section.py::TestSection::test_include",
"test/test_section.py::TestSection::test_insert",
"test/test_section.py::TestSection::test_link",
"test/test_section.py::TestSection::test_merge",
"test/test_section.py::TestSection::test_merge_check",
"test/test_section.py::TestSection::test_new_id",
"test/test_section.py::TestSection::test_parent",
"test/test_section.py::TestSection::test_path",
"test/test_section.py::TestSection::test_remove",
"test/test_section.py::TestSection::test_reorder",
"test/test_section.py::TestSection::test_repository",
"test/test_section.py::TestSection::test_simple_attributes",
"test/test_section.py::TestSection::test_unmerge"
] | [] | BSD 4-Clause "Original" or "Old" License | 2,451 | 2,231 | [
"odml/format.py",
"odml/property.py",
"odml/section.py",
"odml/tools/odmlparser.py",
"odml/tools/xmlparser.py"
] |
|
pydicom__pydicom-633 | fcc63f0b96fb370b0eb60b2c765b469ce62e597c | 2018-04-26 18:41:30 | fcc63f0b96fb370b0eb60b2c765b469ce62e597c | scaramallion: Looks good. | diff --git a/pydicom/filewriter.py b/pydicom/filewriter.py
index 797439608..f15749508 100644
--- a/pydicom/filewriter.py
+++ b/pydicom/filewriter.py
@@ -456,6 +456,8 @@ def write_dataset(fp, dataset, parent_encoding=default_encoding):
Attempt to correct ambiguous VR elements when explicit little/big
encoding Elements that can't be corrected will be returned unchanged.
"""
+ _harmonize_properties(dataset, fp)
+
if not fp.is_implicit_VR and not dataset.is_original_encoding:
dataset = correct_ambiguous_vr(dataset, fp.is_little_endian)
@@ -475,6 +477,22 @@ def write_dataset(fp, dataset, parent_encoding=default_encoding):
return fp.tell() - fpStart
+def _harmonize_properties(dataset, fp):
+ """Make sure the properties in the dataset and the file pointer are
+ consistent, so the user can set both with the same effect.
+ Properties set on the destination file object always have preference.
+ """
+ # ensure preference of fp over dataset
+ if hasattr(fp, 'is_little_endian'):
+ dataset.is_little_endian = fp.is_little_endian
+ if hasattr(fp, 'is_implicit_VR'):
+ dataset.is_implicit_VR = fp.is_implicit_VR
+
+ # write the properties back to have a consistent state
+ fp.is_implicit_VR = dataset.is_implicit_VR
+ fp.is_little_endian = dataset.is_little_endian
+
+
def write_sequence(fp, data_element, encoding):
"""Write a dicom Sequence contained in data_element to the file fp."""
# write_data_element has already written the VR='SQ' (if needed) and
| Write failure with implicit -> explicit VR
```python
>>> from pydicom import dcmread
>>> from pydicom.filebase import DicomBytesIO
>>> from pydicom.filewriter import write_dataset
>>> ds = dcmread('dicom_files/RTImageStorage.dcm')
>>> fp = DicomBytesIO()
>>> fp.is_little_endian = True
>>> fp.is_implicit_VR = False
>>> write_dataset(fp, ds)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/.../pydicom/pydicom/filewriter.py", line 473, in write_dataset
write_data_element(fp, dataset.get_item(tag), dataset_encoding)
File "/usr/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/.../pydicom/pydicom/tag.py", line 37, in tag_in_exception
raise type(ex)(msg)
TypeError: With tag (0008, 0008) got exception: object of type 'NoneType' has no len()
Traceback (most recent call last):
File "/.../pydicom/pydicom/tag.py", line 30, in tag_in_exception
yield
File "/.../pydicom/pydicom/filewriter.py", line 473, in write_dataset
write_data_element(fp, dataset.get_item(tag), dataset_encoding)
File "/.../pydicom/pydicom/filewriter.py", line 384, in write_data_element
if len(VR) != 2:
TypeError: object of type 'NoneType' has no len()
```
Probably related to the #616 PR @mrbean-bremen
| pydicom/pydicom | diff --git a/pydicom/tests/test_filewriter.py b/pydicom/tests/test_filewriter.py
index 464d6b172..4b943d651 100644
--- a/pydicom/tests/test_filewriter.py
+++ b/pydicom/tests/test_filewriter.py
@@ -1129,6 +1129,46 @@ class TestWriteToStandard(object):
for elem_in, elem_out in zip(ds_explicit, ds_out):
assert elem_in == elem_out
+ def test_write_dataset(self):
+ # make sure writing and reading back a dataset works correctly
+ ds = dcmread(mr_implicit_name)
+ fp = DicomBytesIO()
+ write_dataset(fp, ds)
+ fp.seek(0)
+ ds_read = read_dataset(fp, is_implicit_VR=True, is_little_endian=True)
+ for elem_orig, elem_read in zip(ds_read, ds):
+ assert elem_orig == elem_read
+
+ def test_write_dataset_with_explicit_vr(self):
+ # make sure conversion from implicit to explicit VR does not
+ # raise (regression test for #632)
+ ds = dcmread(mr_implicit_name)
+ fp = DicomBytesIO()
+ fp.is_implicit_VR = False
+ fp.is_little_endian = True
+ write_dataset(fp, ds)
+ fp.seek(0)
+ ds_read = read_dataset(fp, is_implicit_VR=False, is_little_endian=True)
+ for elem_orig, elem_read in zip(ds_read, ds):
+ assert elem_orig == elem_read
+
+ def test_convert_implicit_to_explicit_vr_using_destination(self):
+ # make sure conversion from implicit to explicit VR works
+ # if setting the property in the destination
+ ds = dcmread(mr_implicit_name)
+ ds.is_implicit_VR = False
+ ds.file_meta.TransferSyntaxUID = '1.2.840.10008.1.2.1'
+ fp = DicomBytesIO()
+ fp.is_implicit_VR = False
+ fp.is_little_endian = True
+ ds.save_as(fp, write_like_original=False)
+ fp.seek(0)
+ ds_out = dcmread(fp)
+ ds_explicit = dcmread(mr_name)
+
+ for elem_in, elem_out in zip(ds_explicit, ds_out):
+ assert elem_in == elem_out
+
def test_convert_explicit_to_implicit_vr(self):
# make sure conversion from explicit to implicit VR works
# without private tags
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
-e git+https://github.com/pydicom/pydicom.git@fcc63f0b96fb370b0eb60b2c765b469ce62e597c#egg=pydicom
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: pydicom
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/pydicom
| [
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_write_dataset"
] | [
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_raw_elements_preserved_implicit_vr",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_raw_elements_preserved_explicit_vr",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_changed_character_set"
] | [
"pydicom/tests/test_filewriter.py::WriteFileTests::testCT",
"pydicom/tests/test_filewriter.py::WriteFileTests::testJPEG2000",
"pydicom/tests/test_filewriter.py::WriteFileTests::testListItemWriteBack",
"pydicom/tests/test_filewriter.py::WriteFileTests::testMR",
"pydicom/tests/test_filewriter.py::WriteFileTests::testMultiPN",
"pydicom/tests/test_filewriter.py::WriteFileTests::testRTDose",
"pydicom/tests/test_filewriter.py::WriteFileTests::testRTPlan",
"pydicom/tests/test_filewriter.py::WriteFileTests::testUnicode",
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_double_filemeta",
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_ffff_ffff",
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_no_ts",
"pydicom/tests/test_filewriter.py::WriteFileTests::test_write_removes_grouplength",
"pydicom/tests/test_filewriter.py::WriteFileTests::testwrite_short_uid",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testCT",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testJPEG2000",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testListItemWriteBack",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testMR",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testMultiPN",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testRTDose",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testRTPlan",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testUnicode",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_multivalue_DA",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_double_filemeta",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_ffff_ffff",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_no_ts",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::test_write_removes_grouplength",
"pydicom/tests/test_filewriter.py::ScratchWriteDateTimeTests::testwrite_short_uid",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_empty_AT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_DA",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_DT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OD_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OD_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OL_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_OL_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_TM",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UC_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UC_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UN_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UR_explicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_UR_implicit_little",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_empty_LO",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_DA",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_DT",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_multi_TM",
"pydicom/tests/test_filewriter.py::WriteDataElementTests::test_write_unknown_vr_raises",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_lut_descriptor",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_overlay",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_data",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_representation_vm_one",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_pixel_representation_vm_three",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_sequence",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVR::test_waveform_bits_allocated",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_not_ambiguous",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_not_ambiguous_raw_data_element",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_correct_ambiguous_data_element",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_correct_ambiguous_raw_data_element",
"pydicom/tests/test_filewriter.py::TestCorrectAmbiguousVRElement::test_pixel_data_not_ow_or_ob",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_big_endian",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_little_endian",
"pydicom/tests/test_filewriter.py::WriteAmbiguousVRTests::test_write_explicit_vr_raises",
"pydicom/tests/test_filewriter.py::ScratchWriteTests::testImpl_LE_deflen_write",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_preamble_default",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_preamble_custom",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_no_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_none_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_bad_preamble",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_prefix",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_prefix_none",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_ds_changed",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_convert_implicit_to_explicit_vr",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_write_dataset_with_explicit_vr",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_convert_implicit_to_explicit_vr_using_destination",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_convert_explicit_to_implicit_vr",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_convert_big_to_little_endian",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_convert_little_to_big_endian",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_private_tag_vr_from_implicit_data",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_convert_rgb_from_implicit_to_explicit_vr",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_not_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_transfer_syntax_raises",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_media_storage_sop_class_uid_added",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_write_no_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_raise_no_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_add_file_meta",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_standard",
"pydicom/tests/test_filewriter.py::TestWriteToStandard::test_commandset_no_written",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_bad_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_missing_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_group_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_group_length_updated",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_version",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_implementation_version_name_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_implementation_class_uid_length",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoToStandard::test_filelike_position",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_filemeta",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_commandset_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_ds_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_file_meta_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_no_preamble",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_filemeta",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_commandset_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_custom",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_default",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_preamble_filemeta_dataset",
"pydicom/tests/test_filewriter.py::TestWriteNonStandard::test_read_write_identical",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_bad_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_filelike_position",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_group_length_updated",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_meta_unchanged",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_missing_elements",
"pydicom/tests/test_filewriter.py::TestWriteFileMetaInfoNonStandard::test_transfer_syntax_not_added",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_empty_value",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_list",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_singleton",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_exception",
"pydicom/tests/test_filewriter.py::TestWriteNumbers::test_write_big_endian",
"pydicom/tests/test_filewriter.py::TestWritePN::test_no_encoding_unicode",
"pydicom/tests/test_filewriter.py::TestWritePN::test_no_encoding",
"pydicom/tests/test_filewriter.py::TestWriteDT::test_format_dt",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_big_endian_correct_data",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_big_endian_incorrect_data",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_little_endian_correct_data",
"pydicom/tests/test_filewriter.py::TestWriteUndefinedLengthPixelData::test_little_endian_incorrect_data"
] | [] | MIT License | 2,452 | 414 | [
"pydicom/filewriter.py"
] |
sangoma__ursine-12 | b9523c22a724b42e84e2e3093cb02b801e03fa70 | 2018-04-27 21:56:42 | b9523c22a724b42e84e2e3093cb02b801e03fa70 | diff --git a/ursine/uri.py b/ursine/uri.py
index e7feacc..57be602 100644
--- a/ursine/uri.py
+++ b/ursine/uri.py
@@ -200,6 +200,16 @@ class URI:
else:
return uri
+ def short_uri(self):
+ if self.user and self.password:
+ user = f'{self.user}:{self.password}@'
+ elif self.user:
+ user = f'{self.user}@'
+ else:
+ user = ''
+
+ return f'{self.scheme}:{user}{self.host}:{self.port}'
+
def __repr__(self):
return f'{self.__class__.__name__}({self})'
| Missing `short_uri`
aiosip needs this method to generate URI addresses. | sangoma/ursine | diff --git a/tests/test_uri.py b/tests/test_uri.py
index d74630d..36a8806 100644
--- a/tests/test_uri.py
+++ b/tests/test_uri.py
@@ -29,6 +29,19 @@ def test_to_str(uri, expect):
assert str(URI(uri)) == expect
[email protected]('uri,expect', [
+ ('sip:localhost', 'sip:localhost:5060'),
+ ('sips:localhost', 'sips:localhost:5061'),
+ ('<sip:localhost>', 'sip:localhost:5060'),
+ (
+ 'John Doe <sip:localhost:5080?x=y&a=b>',
+ 'sip:localhost:5080',
+ )
+])
+def test_to_short_uri(uri, expect):
+ assert URI(uri).short_uri() == expect
+
+
@pytest.mark.parametrize('uri,expect', [
('sip:localhost', 'URI(sip:localhost:5060;transport=udp)'),
('sips:localhost', 'URI(sips:localhost:5061;transport=tcp)'),
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 0.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"Pipfile"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
multidict==5.2.0
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
-e git+https://github.com/sangoma/ursine.git@b9523c22a724b42e84e2e3093cb02b801e03fa70#egg=ursine
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: ursine
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- multidict==5.2.0
prefix: /opt/conda/envs/ursine
| [
"tests/test_uri.py::test_to_short_uri[sip:localhost-sip:localhost:5060]",
"tests/test_uri.py::test_to_short_uri[sips:localhost-sips:localhost:5061]",
"tests/test_uri.py::test_to_short_uri[<sip:localhost>-sip:localhost:5060]",
"tests/test_uri.py::test_to_short_uri[John"
] | [] | [
"tests/test_uri.py::test_invalid[sip:localhost:port]",
"tests/test_uri.py::test_invalid[sip:localhost:0]",
"tests/test_uri.py::test_invalid[sip:localhost:70000]",
"tests/test_uri.py::test_invalid[sip:localhost?]",
"tests/test_uri.py::test_invalid[sip:localhost;]",
"tests/test_uri.py::test_invalid[sip:localhost&]",
"tests/test_uri.py::test_to_str[sip:localhost-sip:localhost:5060;transport=udp]",
"tests/test_uri.py::test_to_str[sips:localhost-sips:localhost:5061;transport=tcp]",
"tests/test_uri.py::test_to_str[<sip:localhost>-sip:localhost:5060;transport=udp]",
"tests/test_uri.py::test_to_str[John",
"tests/test_uri.py::test_repr[sip:localhost-URI(sip:localhost:5060;transport=udp)]",
"tests/test_uri.py::test_repr[sips:localhost-URI(sips:localhost:5061;transport=tcp)]",
"tests/test_uri.py::test_equality[sip:localhost-sip:localhost]",
"tests/test_uri.py::test_equality[sip:localhost-sip:localhost;transport=udp]",
"tests/test_uri.py::test_equality[<sip:localhost>-sip:localhost]",
"tests/test_uri.py::test_equality[Alice",
"tests/test_uri.py::test_equality[SIP:localhost-sip:localhost]",
"tests/test_uri.py::test_equality[<sip:localhost>;tag=foo-sip:localhost;tag=foo]",
"tests/test_uri.py::test_equality[<sip:localhost>",
"tests/test_uri.py::test_inequality[sip:localhost-sips:localhost]",
"tests/test_uri.py::test_inequality[Bob",
"tests/test_uri.py::test_inequality[Alice",
"tests/test_uri.py::test_inequality[sip:remotehost-sip:localhost]",
"tests/test_uri.py::test_build[kwargs0-sip:localhost]",
"tests/test_uri.py::test_build[kwargs1-sip:localhost;transport=tcp]",
"tests/test_uri.py::test_build[kwargs2-sips:[::1]:5080;maddr=[::dead:beef]?x=y&a=]",
"tests/test_uri.py::test_modified_uri_creation[sip:localhost-user-jdoe-sip:jdoe@localhost]",
"tests/test_uri.py::test_modified_uri_creation[sip:localhost;transport=tcp-scheme-sips-sips:localhost:5060]",
"tests/test_uri.py::test_modified_uri_creation[sip:localhost-port-5080-sip:localhost:5080]",
"tests/test_uri.py::test_modified_uri_creation[sip:jdoe@localhost-user-None-sip:localhost]",
"tests/test_uri.py::test_modified_uri_creation[\"Mark\"",
"tests/test_uri.py::test_modified_uri_creation[sip:user:pass@localhost-user-None-sip:localhost]",
"tests/test_uri.py::test_modified_uri_creation[sip:localhost-user-user:pass-sip:user:pass@localhost]",
"tests/test_uri.py::test_modified_uri_creation[sip:alice@localhost-password-pass-sip:alice:pass@localhost]",
"tests/test_uri.py::test_modified_uri_creation[sip:localhost-transport-tcp-sip:localhost;transport=tcp]",
"tests/test_uri.py::test_modified_uri_creation[sip:localhost-tag-bler-sip:localhost;transport=udp;tag=bler]",
"tests/test_uri.py::test_modified_uri_creation[sip:localhost-parameters-new10-sip:localhost;maddr=[::1];foo=bar;x=]",
"tests/test_uri.py::test_modified_uri_creation[sip:localhost-headers-new11-sip:localhost?ahhhh=&foo=bar]",
"tests/test_uri.py::test_modified_uri_creation[sip:localhost-parameters-new12-sip:localhost;maddr=[::1];foo=bar;x=]",
"tests/test_uri.py::test_modified_uri_creation[sip:localhost-headers-new13-sip:localhost?ahhhh=&foo=bar]",
"tests/test_uri.py::test_tag_generation[sip:localhost-None-None]",
"tests/test_uri.py::test_tag_generation[sip:localhost-5654-5654]",
"tests/test_uri.py::test_tag_generation[sip:localhost;tag=2000-5654-5654]",
"tests/test_uri.py::test_tag_generation[sip:localhost;tag=2ace-None-2ace]"
] | [] | Apache License 2.0 | 2,458 | 176 | [
"ursine/uri.py"
] |
|
numpy__numpydoc-175 | 8f1ac50a7267e9e1ee66141fd71561c2ca2dc713 | 2018-05-02 22:37:28 | 1f197e32a31db2280b71be183e6724f9457ce78e | timhoffm: Note: CI currently fails because of pip changes. Should be fixed by #174.
jnothman: if you've not had to modify any tests, how do we know this affects output?
timhoffm: As said above, I didn't run any tests so far myself. It's apparent that one parser for "Parameters" and "Returns" cannot get both right. I'm confident that the proposed code change itself is correct. What has to be still shown is that the calling code and tests work with that (it might be that they partly compensate for the original bug). I thought I'd use the tests in CI for that, but CI is already prematurely failing for different reasons.
timhoffm: Rebased onto master.
*Note:* The tests do currently fail. Waiting for #176 before doing any further changes to fix tests.
timhoffm: PR updated.
Single element returns params such as:
~~~
Returns
-------
int
The return value.
~~~
were detected as names. I.e. `int` was considered a name. This logical error has been fixed such that `int` is now a type and the name is empty.
As a consequence, `int` is not formatted bold anymore. This is consistent with the formatting of types in patterns like `x : int` and a prerequisite for type references like ``:class:`MyClass` `` to work in this position.
larsoner: @timhoffm can you rebase? Then I can take a look and hopefully merge
rgommers: I've taken the liberty of fixing the merge conflict. The only nontrivial change was deciding where the new heading `Receives` goes; I added it to `'Returns', 'Yields', 'Raises', 'Warns'`. | diff --git a/numpydoc/docscrape.py b/numpydoc/docscrape.py
index 02afd88..32245a9 100644
--- a/numpydoc/docscrape.py
+++ b/numpydoc/docscrape.py
@@ -220,7 +220,7 @@ class NumpyDocString(Mapping):
else:
yield name, self._strip(data[2:])
- def _parse_param_list(self, content):
+ def _parse_param_list(self, content, single_element_is_type=False):
r = Reader(content)
params = []
while not r.eof():
@@ -228,7 +228,10 @@ class NumpyDocString(Mapping):
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
- arg_name, arg_type = header, ''
+ if single_element_is_type:
+ arg_name, arg_type = '', header
+ else:
+ arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
@@ -393,10 +396,12 @@ class NumpyDocString(Mapping):
self._error_location("The section %s appears twice"
% section)
- if section in ('Parameters', 'Returns', 'Yields', 'Receives',
- 'Raises', 'Warns', 'Other Parameters', 'Attributes',
+ if section in ('Parameters', 'Other Parameters', 'Attributes',
'Methods'):
self[section] = self._parse_param_list(content)
+ elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):
+ self[section] = self._parse_param_list(
+ content, single_element_is_type=True)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
@@ -452,10 +457,12 @@ class NumpyDocString(Mapping):
if self[name]:
out += self._str_header(name)
for param in self[name]:
+ parts = []
+ if param.name:
+ parts.append(param.name)
if param.type:
- out += ['%s : %s' % (param.name, param.type)]
- else:
- out += [param.name]
+ parts.append(param.type)
+ out += [' : '.join(parts)]
if param.desc and ''.join(param.desc).strip():
out += self._str_indent(param.desc)
out += ['']
@@ -637,7 +644,7 @@ class ClassDoc(NumpyDocString):
if _members is ALL:
_members = None
_exclude = config.get('exclude-members', [])
-
+
if config.get('show_class_members', True) and _exclude is not ALL:
def splitlines_x(s):
if not s:
@@ -649,7 +656,7 @@ class ClassDoc(NumpyDocString):
if not self[field]:
doc_list = []
for name in sorted(items):
- if (name in _exclude or
+ if (name in _exclude or
(_members and name not in _members)):
continue
try:
diff --git a/numpydoc/docscrape_sphinx.py b/numpydoc/docscrape_sphinx.py
index 9b23235..aad64c7 100644
--- a/numpydoc/docscrape_sphinx.py
+++ b/numpydoc/docscrape_sphinx.py
@@ -70,19 +70,19 @@ class SphinxDocString(NumpyDocString):
return self['Extended Summary'] + ['']
def _str_returns(self, name='Returns'):
- typed_fmt = '**%s** : %s'
- untyped_fmt = '**%s**'
+ named_fmt = '**%s** : %s'
+ unnamed_fmt = '%s'
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param in self[name]:
- if param.type:
- out += self._str_indent([typed_fmt % (param.name.strip(),
+ if param.name:
+ out += self._str_indent([named_fmt % (param.name.strip(),
param.type)])
else:
- out += self._str_indent([untyped_fmt % param.name.strip()])
+ out += self._str_indent([unnamed_fmt % param.type.strip()])
if not param.desc:
out += self._str_indent(['..'], 8)
else:
@@ -209,12 +209,13 @@ class SphinxDocString(NumpyDocString):
display_param, desc = self._process_param(param.name,
param.desc,
fake_autosummary)
-
+ parts = []
+ if display_param:
+ parts.append(display_param)
if param.type:
- out += self._str_indent(['%s : %s' % (display_param,
- param.type)])
- else:
- out += self._str_indent([display_param])
+ parts.append(param.type)
+ out += self._str_indent([' : '.join(parts)])
+
if desc and self.use_blockquotes:
out += ['']
elif not desc:
@@ -376,8 +377,8 @@ class SphinxDocString(NumpyDocString):
'yields': self._str_returns('Yields'),
'receives': self._str_returns('Receives'),
'other_parameters': self._str_param_list('Other Parameters'),
- 'raises': self._str_param_list('Raises'),
- 'warns': self._str_param_list('Warns'),
+ 'raises': self._str_returns('Raises'),
+ 'warns': self._str_returns('Warns'),
'warnings': self._str_warnings(),
'see_also': self._str_see_also(func_role),
'notes': self._str_section('Notes'),
| Anonymous return values have their types populated in the name slot of the tuple.
I noticed an inconsistency, when using numpydoc version 0.6.0 in python2.7 on Ubuntu. The parsed return section information returns different styles of tuple depending on if the return value is anoymous or not.
Here is a minimal working example:
```python
def mwe():
from numpydoc.docscrape import NumpyDocString
docstr = (
'Returns\n'
'----------\n'
'int\n'
' can return an anoymous integer\n'
'out : ndarray\n'
' can return a named value\n'
)
doc = NumpyDocString(docstr)
returns = doc._parsed_data['Returns']
print(returns)
```
This results in
```python
[(u'int', '', [u'can return an anoymous integer']),
(u'out', u'ndarray', [u'can return a named value'])]
```
However judging by tests (due to lack of docs), I believe it was indented that each value in the returns list should be a tuple of `(arg, arg_type, arg_desc)`. Therefore we should see this instead:
```python
[('', u'int', [u'can return an anoymous integer']),
(u'out', u'ndarray', [u'can return a named value'])]
```
My current workaround is this:
```python
for p_name, p_type, p_descr in returns:
if not p_type:
p_name = ''
p_type = p_name
```
| numpy/numpydoc | diff --git a/numpydoc/tests/test_docscrape.py b/numpydoc/tests/test_docscrape.py
index b4b7e03..e5e3f1f 100644
--- a/numpydoc/tests/test_docscrape.py
+++ b/numpydoc/tests/test_docscrape.py
@@ -211,14 +211,14 @@ def test_returns():
assert desc[-1].endswith('distribution.')
arg, arg_type, desc = doc['Returns'][1]
- assert arg == 'list of str'
- assert arg_type == ''
+ assert arg == ''
+ assert arg_type == 'list of str'
assert desc[0].startswith('This is not a real')
assert desc[-1].endswith('anonymous return values.')
arg, arg_type, desc = doc['Returns'][2]
- assert arg == 'no_description'
- assert arg_type == ''
+ assert arg == ''
+ assert arg_type == 'no_description'
assert not ''.join(desc).strip()
@@ -227,7 +227,7 @@ def test_yields():
assert len(section) == 3
truth = [('a', 'int', 'apples.'),
('b', 'int', 'bananas.'),
- ('int', '', 'unknowns.')]
+ ('', 'int', 'unknowns.')]
for (arg, arg_type, desc), (arg_, arg_type_, end) in zip(section, truth):
assert arg == arg_
assert arg_type == arg_type_
@@ -594,11 +594,11 @@ of the one-dimensional normal distribution to higher dimensions.
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
- **list of str**
+ list of str
This is not a real return value. It exists to test
anonymous return values.
- **no_description**
+ no_description
..
:Other Parameters:
@@ -608,12 +608,12 @@ of the one-dimensional normal distribution to higher dimensions.
:Raises:
- **RuntimeError**
+ RuntimeError
Some error
:Warns:
- **RuntimeWarning**
+ RuntimeWarning
Some warning
.. warning::
@@ -687,7 +687,7 @@ def test_sphinx_yields_str():
**b** : int
The number of bananas.
- **int**
+ int
The number of unknowns.
""")
@@ -754,16 +754,18 @@ doc5 = NumpyDocString(
def test_raises():
assert len(doc5['Raises']) == 1
- name, _, desc = doc5['Raises'][0]
- assert name == 'LinAlgException'
- assert desc == ['If array is singular.']
+ param = doc5['Raises'][0]
+ assert param.name == ''
+ assert param.type == 'LinAlgException'
+ assert param.desc == ['If array is singular.']
def test_warns():
assert len(doc5['Warns']) == 1
- name, _, desc = doc5['Warns'][0]
- assert name == 'SomeWarning'
- assert desc == ['If needed']
+ param = doc5['Warns'][0]
+ assert param.name == ''
+ assert param.type == 'SomeWarning'
+ assert param.desc == ['If needed']
def test_see_also():
@@ -995,7 +997,7 @@ def test_use_blockquotes():
GHI
- **JKL**
+ JKL
MNO
''')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 3
},
"num_modified_files": 2
} | 0.8 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc texlive texlive-latex-extra latexmk"
],
"python": "3.9",
"reqs_path": [
"doc/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
babel==2.17.0
certifi==2025.1.31
charset-normalizer==3.4.1
contourpy==1.3.0
cycler==0.12.1
docutils==0.21.2
exceptiongroup==1.2.2
fonttools==4.56.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
importlib_resources==6.5.2
iniconfig==2.1.0
Jinja2==3.1.6
kiwisolver==1.4.7
MarkupSafe==3.0.2
matplotlib==3.9.4
numpy==2.0.2
-e git+https://github.com/numpy/numpydoc.git@8f1ac50a7267e9e1ee66141fd71561c2ca2dc713#egg=numpydoc
packaging==24.2
pillow==11.1.0
pluggy==1.5.0
Pygments==2.19.1
pyparsing==3.2.3
pytest==8.3.5
python-dateutil==2.9.0.post0
requests==2.32.3
six==1.17.0
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
tomli==2.2.1
urllib3==2.3.0
zipp==3.21.0
| name: numpydoc
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- babel==2.17.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- contourpy==1.3.0
- cycler==0.12.1
- docutils==0.21.2
- exceptiongroup==1.2.2
- fonttools==4.56.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- importlib-resources==6.5.2
- iniconfig==2.1.0
- jinja2==3.1.6
- kiwisolver==1.4.7
- markupsafe==3.0.2
- matplotlib==3.9.4
- numpy==2.0.2
- packaging==24.2
- pillow==11.1.0
- pluggy==1.5.0
- pygments==2.19.1
- pyparsing==3.2.3
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- requests==2.32.3
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- tomli==2.2.1
- urllib3==2.3.0
- zipp==3.21.0
prefix: /opt/conda/envs/numpydoc
| [
"numpydoc/tests/test_docscrape.py::test_returns",
"numpydoc/tests/test_docscrape.py::test_yields",
"numpydoc/tests/test_docscrape.py::test_sphinx_str",
"numpydoc/tests/test_docscrape.py::test_sphinx_yields_str",
"numpydoc/tests/test_docscrape.py::test_raises",
"numpydoc/tests/test_docscrape.py::test_warns",
"numpydoc/tests/test_docscrape.py::test_use_blockquotes"
] | [] | [
"numpydoc/tests/test_docscrape.py::test_signature",
"numpydoc/tests/test_docscrape.py::test_summary",
"numpydoc/tests/test_docscrape.py::test_extended_summary",
"numpydoc/tests/test_docscrape.py::test_parameters",
"numpydoc/tests/test_docscrape.py::test_other_parameters",
"numpydoc/tests/test_docscrape.py::test_sent",
"numpydoc/tests/test_docscrape.py::test_returnyield",
"numpydoc/tests/test_docscrape.py::test_section_twice",
"numpydoc/tests/test_docscrape.py::test_notes",
"numpydoc/tests/test_docscrape.py::test_references",
"numpydoc/tests/test_docscrape.py::test_examples",
"numpydoc/tests/test_docscrape.py::test_index",
"numpydoc/tests/test_docscrape.py::test_str",
"numpydoc/tests/test_docscrape.py::test_yield_str",
"numpydoc/tests/test_docscrape.py::test_receives_str",
"numpydoc/tests/test_docscrape.py::test_no_index_in_str",
"numpydoc/tests/test_docscrape.py::test_parameters_without_extended_description",
"numpydoc/tests/test_docscrape.py::test_escape_stars",
"numpydoc/tests/test_docscrape.py::test_empty_extended_summary",
"numpydoc/tests/test_docscrape.py::test_see_also",
"numpydoc/tests/test_docscrape.py::test_see_also_parse_error",
"numpydoc/tests/test_docscrape.py::test_see_also_print",
"numpydoc/tests/test_docscrape.py::test_unknown_section",
"numpydoc/tests/test_docscrape.py::test_empty_first_line",
"numpydoc/tests/test_docscrape.py::test_no_summary",
"numpydoc/tests/test_docscrape.py::test_unicode",
"numpydoc/tests/test_docscrape.py::test_plot_examples",
"numpydoc/tests/test_docscrape.py::test_class_members",
"numpydoc/tests/test_docscrape.py::test_duplicate_signature",
"numpydoc/tests/test_docscrape.py::test_class_members_doc",
"numpydoc/tests/test_docscrape.py::test_class_members_doc_sphinx",
"numpydoc/tests/test_docscrape.py::test_templated_sections",
"numpydoc/tests/test_docscrape.py::test_nonstandard_property",
"numpydoc/tests/test_docscrape.py::test_args_and_kwargs",
"numpydoc/tests/test_docscrape.py::test_autoclass"
] | [] | BSD License | 2,472 | 1,354 | [
"numpydoc/docscrape.py",
"numpydoc/docscrape_sphinx.py"
] |
jupyter__nbgrader-954 | bbc694e8ee4c1aa4eeaee0936491ff19b20bad60 | 2018-05-03 21:33:50 | 5bc6f37c39c8b10b8f60440b2e6d9487e63ef3f1 | diff --git a/nbgrader/utils.py b/nbgrader/utils.py
index 55824f3f..55f440ab 100644
--- a/nbgrader/utils.py
+++ b/nbgrader/utils.py
@@ -194,8 +194,10 @@ def find_all_files(path, exclude=None):
"""Recursively finds all filenames rooted at `path`, optionally excluding
some based on filename globs."""
files = []
+ to_skip = []
for dirname, dirnames, filenames in os.walk(path):
- if is_ignored(dirname, exclude):
+ if is_ignored(dirname, exclude) or dirname in to_skip:
+ to_skip.extend([os.path.join(dirname, x) for x in dirnames])
continue
for filename in filenames:
fullpath = os.path.join(dirname, filename)
| Unexpected behaviour of utils.find_all_files
<!--
Thanks for helping to improve nbgrader!
If you are submitting a bug report or looking for support, please use the below
template so we can efficiently solve the problem.
If you are requesting a new feature, feel free to remove irrelevant pieces of
the issue template.
-->
### Operating system
Ubunto 16.04
### `nbgrader --version`
nbgrader version 0.5.4
### `jupyterhub --version` (if used with JupyterHub)
0.8.1
### `jupyter notebook --version`
5.4.1
### Expected behavior
By including '.git' or '.git/**' in CourseDirectory.ignore anything under the git directory to be ignored.
### Actual behavior
Anything in subdirectories of '.git' is included.
### Steps to reproduce the behavior
$ mkdir -p foo/bar/qwe
$ touch foo/bar/qwe/file.py
$ /opt/conda/bin/python -c "from nbgrader.utils import find_all_files;print(find_all_files('foo', ['bar']))"
['foo/bar/qwe/file.py']
I'm sorry if this is expected behaviour but I found it surprising. | jupyter/nbgrader | diff --git a/nbgrader/tests/utils/test_utils.py b/nbgrader/tests/utils/test_utils.py
index 2814ea5c..ca76e83f 100644
--- a/nbgrader/tests/utils/test_utils.py
+++ b/nbgrader/tests/utils/test_utils.py
@@ -272,18 +272,34 @@ def test_is_ignored(temp_cwd):
def test_find_all_files(temp_cwd):
- os.makedirs(join("foo", "bar"))
+ os.makedirs(join("foo", "bar", "quux"))
with open(join("foo", "baz.txt"), "w") as fh:
fh.write("baz")
with open(join("foo", "bar", "baz.txt"), "w") as fh:
fh.write("baz")
+ with open(join("foo", "bar", "quux", "baz.txt"), "w") as fh:
+ fh.write("baz")
- assert utils.find_all_files("foo") == [join("foo", "baz.txt"), join("foo", "bar", "baz.txt")]
+ assert utils.find_all_files("foo") == [
+ join("foo", "baz.txt"),
+ join("foo", "bar", "baz.txt"),
+ join("foo", "bar", "quux", "baz.txt")]
assert utils.find_all_files("foo", ["bar"]) == [join("foo", "baz.txt")]
- assert utils.find_all_files(join("foo", "bar")) == [join("foo", "bar", "baz.txt")]
+ assert utils.find_all_files("foo", ["quux"]) == [
+ join("foo", "baz.txt"),
+ join("foo", "bar", "baz.txt")]
+ assert utils.find_all_files(join("foo", "bar")) == [
+ join("foo", "bar", "baz.txt"),
+ join("foo", "bar", "quux", "baz.txt")]
assert utils.find_all_files(join("foo", "bar"), ["*.txt"]) == []
- assert utils.find_all_files(".") == [join(".", "foo", "baz.txt"), join(".", "foo", "bar", "baz.txt")]
+ assert utils.find_all_files(".") == [
+ join(".", "foo", "baz.txt"),
+ join(".", "foo", "bar", "baz.txt"),
+ join(".", "foo", "bar", "quux", "baz.txt")]
assert utils.find_all_files(".", ["bar"]) == [join(".", "foo", "baz.txt")]
+ assert utils.find_all_files(".", ["quux"]) == [
+ join(".", "foo", "baz.txt"),
+ join(".", "foo", "bar", "baz.txt")]
def test_unzip_invalid_ext(temp_cwd):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pyenchant",
"sphinxcontrib-spelling",
"sphinx_rtd_theme",
"nbval",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.5",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
comm==0.1.4
contextvars==2.4
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@bbc694e8ee4c1aa4eeaee0936491ff19b20bad60#egg=nbgrader
nbval==0.10.0
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- comm==0.1.4
- contextvars==2.4
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbval==0.10.0
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- send2trash==1.8.3
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/utils/test_utils.py::test_find_all_files"
] | [] | [
"nbgrader/tests/utils/test_utils.py::test_is_grade",
"nbgrader/tests/utils/test_utils.py::test_is_solution",
"nbgrader/tests/utils/test_utils.py::test_is_locked",
"nbgrader/tests/utils/test_utils.py::test_determine_grade_code_grade",
"nbgrader/tests/utils/test_utils.py::test_determine_grade_markdown_grade",
"nbgrader/tests/utils/test_utils.py::test_determine_grade_solution",
"nbgrader/tests/utils/test_utils.py::test_determine_grade_code_grade_and_solution",
"nbgrader/tests/utils/test_utils.py::test_determine_grade_markdown_grade_and_solution",
"nbgrader/tests/utils/test_utils.py::test_compute_checksum_identical",
"nbgrader/tests/utils/test_utils.py::test_compute_checksum_cell_type",
"nbgrader/tests/utils/test_utils.py::test_compute_checksum_whitespace",
"nbgrader/tests/utils/test_utils.py::test_compute_checksum_source",
"nbgrader/tests/utils/test_utils.py::test_compute_checksum_points",
"nbgrader/tests/utils/test_utils.py::test_compute_checksum_grade_id",
"nbgrader/tests/utils/test_utils.py::test_compute_checksum_grade_cell",
"nbgrader/tests/utils/test_utils.py::test_compute_checksum_solution_cell",
"nbgrader/tests/utils/test_utils.py::test_compute_checksum_utf8",
"nbgrader/tests/utils/test_utils.py::test_is_ignored",
"nbgrader/tests/utils/test_utils.py::test_unzip_invalid_ext",
"nbgrader/tests/utils/test_utils.py::test_unzip_bad_zip",
"nbgrader/tests/utils/test_utils.py::test_unzip_no_output_path",
"nbgrader/tests/utils/test_utils.py::test_unzip_create_own_folder",
"nbgrader/tests/utils/test_utils.py::test_unzip_tree"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,475 | 197 | [
"nbgrader/utils.py"
] |
|
Azure__WALinuxAgent-1148 | 423dc18485e4c8d506bd07f77f7612b17bda27eb | 2018-05-03 23:30:54 | 6e9b985c1d7d564253a1c344bab01b45093103cd | boumenot: I opened #1161 to address the telemetry issue. There is some sort of circular dependency issue that manifest on CI, but not locally. I will debug it later, and add the necessary event. | diff --git a/azurelinuxagent/common/protocol/wire.py b/azurelinuxagent/common/protocol/wire.py
index 841f9b72..265b1f6f 100644
--- a/azurelinuxagent/common/protocol/wire.py
+++ b/azurelinuxagent/common/protocol/wire.py
@@ -600,6 +600,12 @@ class WireClient(object):
random.shuffle(version_uris_shuffled)
for version in version_uris_shuffled:
+ # GA expects a location and failoverLocation in ExtensionsConfig, but
+ # this is not always the case. See #1147.
+ if version.uri is None:
+ logger.verbose('The specified manifest URL is empty, ignored.')
+ continue
+
response = None
if not HostPluginProtocol.is_default_channel():
response = self.fetch(version.uri)
diff --git a/azurelinuxagent/common/utils/restutil.py b/azurelinuxagent/common/utils/restutil.py
index 5ceb4c94..fc9aac93 100644
--- a/azurelinuxagent/common/utils/restutil.py
+++ b/azurelinuxagent/common/utils/restutil.py
@@ -170,8 +170,6 @@ def _http_request(method, host, rel_uri, port=None, data=None, secure=False,
headers=None, proxy_host=None, proxy_port=None):
headers = {} if headers is None else headers
- headers['Connection'] = 'close'
-
use_proxy = proxy_host is not None and proxy_port is not None
if port is None:
| ExtensionsConfig May Not Contain a failoverLocation Attribute
The agent expects ExtensionsConfig to have a location and failoverLocation for each plugin. This has been proven to not be true for all regions. I consider this to be a bug upstream, but the agent should be robust enough to handle this case.
| Azure/WALinuxAgent | diff --git a/tests/utils/test_rest_util.py b/tests/utils/test_rest_util.py
index adeb8141..a864884a 100644
--- a/tests/utils/test_rest_util.py
+++ b/tests/utils/test_rest_util.py
@@ -195,7 +195,7 @@ class TestHttpOperations(AgentTestCase):
])
HTTPSConnection.assert_not_called()
mock_conn.request.assert_has_calls([
- call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'})
+ call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT})
])
self.assertEqual(1, mock_conn.getresponse.call_count)
self.assertNotEquals(None, resp)
@@ -218,7 +218,7 @@ class TestHttpOperations(AgentTestCase):
call("foo", 443, timeout=10)
])
mock_conn.request.assert_has_calls([
- call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'})
+ call(method="GET", url="/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT})
])
self.assertEqual(1, mock_conn.getresponse.call_count)
self.assertNotEquals(None, resp)
@@ -242,7 +242,7 @@ class TestHttpOperations(AgentTestCase):
])
HTTPSConnection.assert_not_called()
mock_conn.request.assert_has_calls([
- call(method="GET", url="http://foo:80/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'})
+ call(method="GET", url="http://foo:80/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT})
])
self.assertEqual(1, mock_conn.getresponse.call_count)
self.assertNotEquals(None, resp)
@@ -267,7 +267,7 @@ class TestHttpOperations(AgentTestCase):
call("foo.bar", 23333, timeout=10)
])
mock_conn.request.assert_has_calls([
- call(method="GET", url="https://foo:443/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT, 'Connection': 'close'})
+ call(method="GET", url="https://foo:443/bar", body=None, headers={'User-Agent': HTTP_USER_AGENT})
])
self.assertEqual(1, mock_conn.getresponse.call_count)
self.assertNotEquals(None, resp)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 2
} | 2.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio",
"distro"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
distro==1.9.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
-e git+https://github.com/Azure/WALinuxAgent.git@423dc18485e4c8d506bd07f77f7612b17bda27eb#egg=WALinuxAgent
| name: WALinuxAgent
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- distro==1.9.0
- execnet==2.1.1
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- typing-extensions==4.13.0
prefix: /opt/conda/envs/WALinuxAgent
| [
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_direct",
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_direct_secure",
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_proxy",
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_proxy_secure"
] | [] | [
"tests/utils/test_rest_util.py::TestIOErrorCounter::test_get_and_reset",
"tests/utils/test_rest_util.py::TestIOErrorCounter::test_increment_hostplugin",
"tests/utils/test_rest_util.py::TestIOErrorCounter::test_increment_other",
"tests/utils/test_rest_util.py::TestIOErrorCounter::test_increment_protocol",
"tests/utils/test_rest_util.py::TestHttpOperations::test_get_http_proxy_configuration_overrides_env",
"tests/utils/test_rest_util.py::TestHttpOperations::test_get_http_proxy_configuration_requires_host",
"tests/utils/test_rest_util.py::TestHttpOperations::test_get_http_proxy_http_uses_httpproxy",
"tests/utils/test_rest_util.py::TestHttpOperations::test_get_http_proxy_https_uses_httpsproxy",
"tests/utils/test_rest_util.py::TestHttpOperations::test_get_http_proxy_ignores_user_in_httpproxy",
"tests/utils/test_rest_util.py::TestHttpOperations::test_get_http_proxy_none_is_default",
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_raises_for_bad_request",
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_raises_for_resource_gone",
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_retries_exceptions",
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_retries_for_safe_minimum_number_when_throttled",
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_retries_ioerrors",
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_retries_passed_status_codes",
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_retries_status_codes",
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_retries_with_constant_delay_when_throttled",
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_retries_with_fibonacci_delay",
"tests/utils/test_rest_util.py::TestHttpOperations::test_http_request_with_retry",
"tests/utils/test_rest_util.py::TestHttpOperations::test_parse_url",
"tests/utils/test_rest_util.py::TestHttpOperations::test_read_response_bytes",
"tests/utils/test_rest_util.py::TestHttpOperations::test_read_response_error",
"tests/utils/test_rest_util.py::TestHttpOperations::test_request_failed",
"tests/utils/test_rest_util.py::TestHttpOperations::test_request_succeeded"
] | [] | Apache License 2.0 | 2,476 | 364 | [
"azurelinuxagent/common/protocol/wire.py",
"azurelinuxagent/common/utils/restutil.py"
] |
jupyter__nbgrader-959 | 1c823a4410ef3abcdd1a9f50aab5a546c994e4e8 | 2018-05-08 21:39:10 | 5bc6f37c39c8b10b8f60440b2e6d9487e63ef3f1 | diff --git a/nbgrader/apps/assignapp.py b/nbgrader/apps/assignapp.py
index 85fcf0e8..ebac9e3b 100644
--- a/nbgrader/apps/assignapp.py
+++ b/nbgrader/apps/assignapp.py
@@ -40,6 +40,10 @@ flags.update({
{'BaseConverter': {'force': True}},
"Overwrite an assignment/submission if it already exists."
),
+ 'f': (
+ {'BaseConverter': {'force': True}},
+ "Overwrite an assignment/submission if it already exists."
+ ),
})
diff --git a/nbgrader/apps/autogradeapp.py b/nbgrader/apps/autogradeapp.py
index 64ef3320..187df53b 100644
--- a/nbgrader/apps/autogradeapp.py
+++ b/nbgrader/apps/autogradeapp.py
@@ -30,6 +30,10 @@ flags.update({
{'BaseConverter': {'force': True}},
"Overwrite an assignment/submission if it already exists."
),
+ 'f': (
+ {'BaseConverter': {'force': True}},
+ "Overwrite an assignment/submission if it already exists."
+ ),
})
diff --git a/nbgrader/apps/dbapp.py b/nbgrader/apps/dbapp.py
index fa0e2c50..0ac5e83c 100644
--- a/nbgrader/apps/dbapp.py
+++ b/nbgrader/apps/dbapp.py
@@ -78,6 +78,10 @@ student_remove_flags.update({
{'DbStudentRemoveApp': {'force': True}},
"Complete the operation, even if it means grades will be deleted."
),
+ 'f': (
+ {'DbStudentRemoveApp': {'force': True}},
+ "Complete the operation, even if it means grades will be deleted."
+ ),
})
class DbStudentRemoveApp(NbGrader):
@@ -314,6 +318,10 @@ assignment_remove_flags.update({
{'DbAssignmentRemoveApp': {'force': True}},
"Complete the operation, even if it means grades will be deleted."
),
+ 'f': (
+ {'DbAssignmentRemoveApp': {'force': True}},
+ "Complete the operation, even if it means grades will be deleted."
+ ),
})
class DbAssignmentRemoveApp(NbGrader):
diff --git a/nbgrader/apps/feedbackapp.py b/nbgrader/apps/feedbackapp.py
index f4bde288..b25a9578 100644
--- a/nbgrader/apps/feedbackapp.py
+++ b/nbgrader/apps/feedbackapp.py
@@ -19,6 +19,10 @@ flags.update({
{'BaseConverter': {'force': True}},
"Overwrite an assignment/submission if it already exists."
),
+ 'f': (
+ {'BaseConverter': {'force': True}},
+ "Overwrite an assignment/submission if it already exists."
+ ),
})
class FeedbackApp(NbGrader):
diff --git a/nbgrader/apps/quickstartapp.py b/nbgrader/apps/quickstartapp.py
index 77154df3..462e1cd7 100644
--- a/nbgrader/apps/quickstartapp.py
+++ b/nbgrader/apps/quickstartapp.py
@@ -26,6 +26,20 @@ flags = {
"""
)
),
+ 'f': (
+ {'QuickStartApp': {'force': True}},
+ dedent(
+ """
+ Overwrite existing files if they already exist. WARNING: this is
+ equivalent to doing:
+
+ rm -r <course_id>
+ nbgrader quickstart <course_id>
+
+ So be careful when using this flag!
+ """
+ )
+ ),
}
class QuickStartApp(NbGrader):
diff --git a/nbgrader/apps/releaseapp.py b/nbgrader/apps/releaseapp.py
index 0968ef4b..c44270cd 100644
--- a/nbgrader/apps/releaseapp.py
+++ b/nbgrader/apps/releaseapp.py
@@ -20,6 +20,10 @@ flags.update({
{'ExchangeRelease' : {'force' : True}},
"Force overwrite of existing files in the exchange."
),
+ 'f': (
+ {'ExchangeRelease' : {'force' : True}},
+ "Force overwrite of existing files in the exchange."
+ ),
})
class ReleaseApp(NbGrader):
diff --git a/nbgrader/apps/zipcollectapp.py b/nbgrader/apps/zipcollectapp.py
index 1183667f..2cac325e 100644
--- a/nbgrader/apps/zipcollectapp.py
+++ b/nbgrader/apps/zipcollectapp.py
@@ -35,6 +35,13 @@ flags = {
},
"Force overwrite of existing files."
),
+ 'f': (
+ {
+ 'ZipCollectApp': {'force': True},
+ 'ExtractorPlugin': {'force': True}
+ },
+ "Force overwrite of existing files."
+ ),
'strict': (
{'ZipCollectApp': {'strict': True}},
"Skip submitted notebooks with invalid names."
| Allow nbgrader apps to use -f and --force
Currently only --force is supported, which means you have to do:
```
nbgrader autograde ps1 --force
```
rather than
```
nbgrader autograde ps1 -f
```
Both should be supported flags.
| jupyter/nbgrader | diff --git a/nbgrader/tests/apps/test_nbgrader_assign.py b/nbgrader/tests/apps/test_nbgrader_assign.py
index c39d91db..0af3eb6a 100644
--- a/nbgrader/tests/apps/test_nbgrader_assign.py
+++ b/nbgrader/tests/apps/test_nbgrader_assign.py
@@ -126,6 +126,38 @@ class TestNbGraderAssign(BaseTestApp):
assert not os.path.isfile(join(course_dir, "release", "ps1", "foo.txt"))
assert not os.path.isfile(join(course_dir, "release", "ps1", "blah.pyc"))
+ def test_force_f(self, course_dir):
+ """Ensure the force option works properly"""
+ self._copy_file(join('files', 'test.ipynb'), join(course_dir, 'source', 'ps1', 'test.ipynb'))
+ self._make_file(join(course_dir, 'source', 'ps1', 'foo.txt'), "foo")
+ self._make_file(join(course_dir, 'source', 'ps1', 'data', 'bar.txt'), "bar")
+ self._make_file(join(course_dir, 'source', 'ps1', 'blah.pyc'), "asdf")
+ with open("nbgrader_config.py", "a") as fh:
+ fh.write("""c.CourseDirectory.db_assignments = [dict(name="ps1")]\n""")
+
+ run_nbgrader(["assign", "ps1"])
+ assert os.path.isfile(join(course_dir, 'release', 'ps1', 'test.ipynb'))
+ assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt'))
+ assert os.path.isfile(join(course_dir, 'release', 'ps1', 'data', 'bar.txt'))
+ assert not os.path.isfile(join(course_dir, 'release', 'ps1', 'blah.pyc'))
+
+ # check that it skips the existing directory
+ os.remove(join(course_dir, 'release', 'ps1', 'foo.txt'))
+ run_nbgrader(["assign", "ps1"])
+ assert not os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt'))
+
+ # force overwrite the supplemental files
+ run_nbgrader(["assign", "ps1", "-f"])
+ assert os.path.isfile(join(course_dir, 'release', 'ps1', 'foo.txt'))
+
+ # force overwrite
+ os.remove(join(course_dir, 'source', 'ps1', 'foo.txt'))
+ run_nbgrader(["assign", "ps1", "-f"])
+ assert os.path.isfile(join(course_dir, "release", "ps1", "test.ipynb"))
+ assert os.path.isfile(join(course_dir, "release", "ps1", "data", "bar.txt"))
+ assert not os.path.isfile(join(course_dir, "release", "ps1", "foo.txt"))
+ assert not os.path.isfile(join(course_dir, "release", "ps1", "blah.pyc"))
+
def test_permissions(self, course_dir):
"""Are permissions properly set?"""
self._empty_notebook(join(course_dir, 'source', 'ps1', 'foo.ipynb'))
diff --git a/nbgrader/tests/apps/test_nbgrader_autograde.py b/nbgrader/tests/apps/test_nbgrader_autograde.py
index ba44d44b..02cfbcbd 100644
--- a/nbgrader/tests/apps/test_nbgrader_autograde.py
+++ b/nbgrader/tests/apps/test_nbgrader_autograde.py
@@ -335,6 +335,46 @@ class TestNbGraderAutograde(BaseTestApp):
assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt"))
assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc"))
+ def test_force_f(self, db, course_dir):
+ """Ensure the force option works properly"""
+ with open("nbgrader_config.py", "a") as fh:
+ fh.write("""c.CourseDirectory.db_assignments = [dict(name='ps1', duedate='2015-02-02 14:58:23.948203 PST')]\n""")
+ fh.write("""c.CourseDirectory.db_students = [dict(id="foo"), dict(id="bar")]""")
+
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
+ self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo")
+ self._make_file(join(course_dir, "source", "ps1", "data", "bar.txt"), "bar")
+ run_nbgrader(["assign", "ps1", "--db", db])
+
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "foo.txt"), "foo")
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "data", "bar.txt"), "bar")
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "blah.pyc"), "asdf")
+ run_nbgrader(["autograde", "ps1", "--db", db])
+
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt"))
+ assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc"))
+
+ # check that it skips the existing directory
+ remove(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
+ run_nbgrader(["autograde", "ps1", "--db", db])
+ assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
+
+ # force overwrite the supplemental files
+ run_nbgrader(["autograde", "ps1", "--db", db, "-f"])
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
+
+ # force overwrite
+ remove(join(course_dir, "source", "ps1", "foo.txt"))
+ remove(join(course_dir, "submitted", "foo", "ps1", "foo.txt"))
+ run_nbgrader(["autograde", "ps1", "--db", db, "-f"])
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "p1.ipynb"))
+ assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
+ assert os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "data", "bar.txt"))
+ assert not os.path.isfile(join(course_dir, "autograded", "foo", "ps1", "blah.pyc"))
+
def test_filter_notebook(self, db, course_dir):
"""Does autograding filter by notebook properly?"""
with open("nbgrader_config.py", "a") as fh:
diff --git a/nbgrader/tests/apps/test_nbgrader_db.py b/nbgrader/tests/apps/test_nbgrader_db.py
index 5b7789da..9576ecae 100644
--- a/nbgrader/tests/apps/test_nbgrader_db.py
+++ b/nbgrader/tests/apps/test_nbgrader_db.py
@@ -105,7 +105,33 @@ class TestNbGraderDb(BaseTestApp):
# now force it to complete
run_nbgrader(["db", "student", "remove", "foo", "--force", "--db", db])
- # student should be gone
+ # student should be gone
+ with Gradebook(db) as gb:
+ with pytest.raises(MissingEntry):
+ gb.find_student("foo")
+
+ def test_student_remove_with_submissions_f(self, db, course_dir):
+ run_nbgrader(["db", "student", "add", "foo", "--db", db])
+ run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
+ run_nbgrader(["assign", "ps1", "--db", db])
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
+ run_nbgrader(["autograde", "ps1", "--db", db])
+
+ with Gradebook(db) as gb:
+ gb.find_student("foo")
+
+ # it should fail if we don't run with --force
+ run_nbgrader(["db", "student", "remove", "foo", "--db", db], retcode=1)
+
+ # make sure we can still find the student
+ with Gradebook(db) as gb:
+ gb.find_student("foo")
+
+ # now force it to complete
+ run_nbgrader(["db", "student", "remove", "foo", "-f", "--db", db])
+
+ # student should be gone
with Gradebook(db) as gb:
with pytest.raises(MissingEntry):
gb.find_student("foo")
@@ -249,6 +275,32 @@ class TestNbGraderDb(BaseTestApp):
with pytest.raises(MissingEntry):
gb.find_assignment("ps1")
+ def test_assignment_remove_with_submissions_f(self, db, course_dir):
+ run_nbgrader(["db", "student", "add", "foo", "--db", db])
+ run_nbgrader(["db", "assignment", "add", "ps1", "--db", db])
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
+ run_nbgrader(["assign", "ps1", "--db", db])
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
+ run_nbgrader(["autograde", "ps1", "--db", db])
+
+ with Gradebook(db) as gb:
+ gb.find_assignment("ps1")
+
+ # it should fail if we don't run with --force
+ run_nbgrader(["db", "assignment", "remove", "ps1", "--db", db], retcode=1)
+
+ # make sure we can still find the assignment
+ with Gradebook(db) as gb:
+ gb.find_assignment("ps1")
+
+ # now force it to complete
+ run_nbgrader(["db", "assignment", "remove", "ps1", "-f", "--db", db])
+
+ # assignment should be gone
+ with Gradebook(db) as gb:
+ with pytest.raises(MissingEntry):
+ gb.find_assignment("ps1")
+
def test_assignment_list(self, db):
run_nbgrader(["db", "assignment", "add", "foo", '--duedate="Sun Jan 8 2017 4:31:22 PM"', "--db", db])
run_nbgrader(["db", "assignment", "add", "bar", "--db", db])
diff --git a/nbgrader/tests/apps/test_nbgrader_feedback.py b/nbgrader/tests/apps/test_nbgrader_feedback.py
index 637f11d7..20fb7a75 100644
--- a/nbgrader/tests/apps/test_nbgrader_feedback.py
+++ b/nbgrader/tests/apps/test_nbgrader_feedback.py
@@ -67,6 +67,46 @@ class TestNbGraderFeedback(BaseTestApp):
assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
+ def test_force_f(self, db, course_dir):
+ """Ensure the force option works properly"""
+ with open("nbgrader_config.py", "a") as fh:
+ fh.write("""c.CourseDirectory.db_assignments = [dict(name="ps1")]\n""")
+ fh.write("""c.CourseDirectory.db_students = [dict(id="foo")]\n""")
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "source", "ps1", "p1.ipynb"))
+ self._make_file(join(course_dir, "source", "ps1", "foo.txt"), "foo")
+ self._make_file(join(course_dir, "source", "ps1", "data", "bar.txt"), "bar")
+ run_nbgrader(["assign", "ps1", "--db", db])
+
+ self._copy_file(join("files", "submitted-unchanged.ipynb"), join(course_dir, "submitted", "foo", "ps1", "p1.ipynb"))
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "foo.txt"), "foo")
+ self._make_file(join(course_dir, "submitted", "foo", "ps1", "data", "bar.txt"), "bar")
+ run_nbgrader(["autograde", "ps1", "--db", db])
+
+ self._make_file(join(course_dir, "autograded", "foo", "ps1", "blah.pyc"), "asdf")
+ run_nbgrader(["feedback", "ps1", "--db", db])
+
+ assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
+ assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
+ assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
+ assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
+
+ # check that it skips the existing directory
+ remove(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
+ run_nbgrader(["feedback", "ps1", "--db", db])
+ assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
+
+ # force overwrite the supplemental files
+ run_nbgrader(["feedback", "ps1", "--db", db, "-f"])
+ assert isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
+
+ # force overwrite
+ remove(join(course_dir, "autograded", "foo", "ps1", "foo.txt"))
+ run_nbgrader(["feedback", "ps1", "--db", db, "--force"])
+ assert isfile(join(course_dir, "feedback", "foo", "ps1", "p1.html"))
+ assert not isfile(join(course_dir, "feedback", "foo", "ps1", "foo.txt"))
+ assert isfile(join(course_dir, "feedback", "foo", "ps1", "data", "bar.txt"))
+ assert not isfile(join(course_dir, "feedback", "foo", "ps1", "blah.pyc"))
+
def test_filter_notebook(self, db, course_dir):
"""Does feedback filter by notebook properly?"""
with open("nbgrader_config.py", "a") as fh:
diff --git a/nbgrader/tests/apps/test_nbgrader_quickstart.py b/nbgrader/tests/apps/test_nbgrader_quickstart.py
index 1189933c..d9a9f705 100644
--- a/nbgrader/tests/apps/test_nbgrader_quickstart.py
+++ b/nbgrader/tests/apps/test_nbgrader_quickstart.py
@@ -39,3 +39,28 @@ class TestNbGraderQuickStart(BaseTestApp):
# nbgrader assign should work
run_nbgrader(["assign", "ps1"])
+ def test_quickstart_f(self):
+ """Is the quickstart example properly generated?"""
+
+ run_nbgrader(["quickstart", "example"])
+
+ # it should fail if it already exists
+ run_nbgrader(["quickstart", "example"], retcode=1)
+
+ # it should succeed if --force is given
+ os.remove(os.path.join("example", "nbgrader_config.py"))
+ run_nbgrader(["quickstart", "example", "-f"])
+ assert os.path.exists(os.path.join("example", "nbgrader_config.py"))
+
+ # nbgrader validate should work
+ os.chdir("example")
+ for nb in os.listdir(os.path.join("source", "ps1")):
+ if not nb.endswith(".ipynb"):
+ continue
+ output = run_nbgrader(["validate", os.path.join("source", "ps1", nb)], stdout=True)
+ assert output.strip() == "Success! Your notebook passes all the tests."
+
+ # nbgrader assign should work
+ run_nbgrader(["assign", "ps1"])
+
+
diff --git a/nbgrader/tests/apps/test_nbgrader_release.py b/nbgrader/tests/apps/test_nbgrader_release.py
index 0d8bf2dc..830f5955 100644
--- a/nbgrader/tests/apps/test_nbgrader_release.py
+++ b/nbgrader/tests/apps/test_nbgrader_release.py
@@ -53,6 +53,19 @@ class TestNbGraderRelease(BaseTestApp):
self._release("ps1", exchange, flags=["--force"])
assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
+ def test_force_release_f(self, exchange, course_dir):
+ self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb"))
+ self._release("ps1", exchange)
+ assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
+
+ self._release("ps1", exchange, retcode=1)
+
+ os.remove(join(exchange, join("abc101", "outbound", "ps1", "p1.ipynb")))
+ self._release("ps1", exchange, retcode=1)
+
+ self._release("ps1", exchange, flags=["-f"])
+ assert os.path.isfile(join(exchange, "abc101", "outbound", "ps1", "p1.ipynb"))
+
def test_release_with_assignment_flag(self, exchange, course_dir):
self._copy_file(join("files", "test.ipynb"), join(course_dir, "release", "ps1", "p1.ipynb"))
self._release("--assignment=ps1", exchange)
diff --git a/nbgrader/tests/apps/test_nbgrader_zip_collect.py b/nbgrader/tests/apps/test_nbgrader_zip_collect.py
index 58343a55..9f5dca14 100644
--- a/nbgrader/tests/apps/test_nbgrader_zip_collect.py
+++ b/nbgrader/tests/apps/test_nbgrader_zip_collect.py
@@ -72,6 +72,25 @@ class TestNbGraderZipCollect(BaseTestApp):
assert os.path.isdir(extracted_dir)
assert len(os.listdir(extracted_dir)) == 1
+ def test_extract_single_notebook_f(self, course_dir, archive_dir):
+ extracted_dir = join(archive_dir, "..", "extracted")
+ self._make_notebook(archive_dir,
+ 'ps1', 'hacker', '2016-01-30-15-30-10', 'problem1')
+
+ run_nbgrader(["zip_collect", "ps1"])
+ assert os.path.isdir(extracted_dir)
+ assert len(os.listdir(extracted_dir)) == 1
+
+ # Run again should fail
+ run_nbgrader(["zip_collect", "ps1"], retcode=1)
+ assert os.path.isdir(extracted_dir)
+ assert len(os.listdir(extracted_dir)) == 1
+
+ # Run again with --force flag should pass
+ run_nbgrader(["zip_collect", "-f", "ps1"])
+ assert os.path.isdir(extracted_dir)
+ assert len(os.listdir(extracted_dir)) == 1
+
def test_extract_sub_dir_single_notebook(self, course_dir, archive_dir):
extracted_dir = join(archive_dir, "..", "extracted")
self._make_notebook(join(archive_dir, 'hacker'),
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 7
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -r dev-requirements.txt -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-rerunfailures",
"coverage",
"selenium",
"invoke",
"sphinx",
"codecov",
"cov-core",
"nbval"
],
"pre_install": [
"pip install -U pip wheel setuptools"
],
"python": "3.5",
"reqs_path": [
"dev-requirements.txt",
"dev-requirements-windows.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
alembic==1.7.7
anyio==3.6.2
argon2-cffi==21.3.0
argon2-cffi-bindings==21.2.0
async-generator==1.10
attrs==22.2.0
Babel==2.11.0
backcall==0.2.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
comm==0.1.4
contextvars==2.4
cov-core==1.15.0
coverage==6.2
dataclasses==0.8
decorator==5.1.1
defusedxml==0.7.1
docutils==0.18.1
entrypoints==0.4
greenlet==2.0.2
idna==3.10
imagesize==1.4.1
immutables==0.19
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
invoke==2.2.0
ipykernel==5.5.6
ipython==7.16.3
ipython-genutils==0.2.0
ipywidgets==7.8.5
jedi==0.17.2
Jinja2==3.0.3
json5==0.9.16
jsonschema==3.2.0
jupyter==1.1.1
jupyter-client==7.1.2
jupyter-console==6.4.3
jupyter-core==4.9.2
jupyter-server==1.13.1
jupyterlab==3.2.9
jupyterlab-pygments==0.1.2
jupyterlab-server==2.10.3
jupyterlab_widgets==1.1.11
Mako==1.1.6
MarkupSafe==2.0.1
mistune==0.8.4
nbclassic==0.3.5
nbclient==0.5.9
nbconvert==6.0.7
nbformat==5.1.3
-e git+https://github.com/jupyter/nbgrader.git@1c823a4410ef3abcdd1a9f50aab5a546c994e4e8#egg=nbgrader
nbval==0.10.0
nest-asyncio==1.6.0
notebook==6.4.10
packaging==21.3
pandocfilters==1.5.1
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
pluggy==1.0.0
prometheus-client==0.17.1
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyenchant==3.2.2
Pygments==2.14.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
pytest-cov==4.0.0
pytest-rerunfailures==10.3
python-dateutil==2.9.0.post0
pytz==2025.2
pyzmq==25.1.2
requests==2.27.1
selenium==3.141.0
Send2Trash==1.8.3
six==1.17.0
sniffio==1.2.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
sphinxcontrib-spelling==7.7.0
SQLAlchemy==1.4.54
terminado==0.12.1
testpath==0.6.0
tomli==1.2.3
tornado==6.1
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
webencodings==0.5.1
websocket-client==1.3.1
widgetsnbextension==3.6.10
zipp==3.6.0
| name: nbgrader
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- alembic==1.7.7
- anyio==3.6.2
- argon2-cffi==21.3.0
- argon2-cffi-bindings==21.2.0
- async-generator==1.10
- attrs==22.2.0
- babel==2.11.0
- backcall==0.2.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- comm==0.1.4
- contextvars==2.4
- cov-core==1.15.0
- coverage==6.2
- dataclasses==0.8
- decorator==5.1.1
- defusedxml==0.7.1
- docutils==0.18.1
- entrypoints==0.4
- greenlet==2.0.2
- idna==3.10
- imagesize==1.4.1
- immutables==0.19
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- invoke==2.2.0
- ipykernel==5.5.6
- ipython==7.16.3
- ipython-genutils==0.2.0
- ipywidgets==7.8.5
- jedi==0.17.2
- jinja2==3.0.3
- json5==0.9.16
- jsonschema==3.2.0
- jupyter==1.1.1
- jupyter-client==7.1.2
- jupyter-console==6.4.3
- jupyter-core==4.9.2
- jupyter-server==1.13.1
- jupyterlab==3.2.9
- jupyterlab-pygments==0.1.2
- jupyterlab-server==2.10.3
- jupyterlab-widgets==1.1.11
- mako==1.1.6
- markupsafe==2.0.1
- mistune==0.8.4
- nbclassic==0.3.5
- nbclient==0.5.9
- nbconvert==6.0.7
- nbformat==5.1.3
- nbval==0.10.0
- nest-asyncio==1.6.0
- notebook==6.4.10
- packaging==21.3
- pandocfilters==1.5.1
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pip==21.3.1
- pluggy==1.0.0
- prometheus-client==0.17.1
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyenchant==3.2.2
- pygments==2.14.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-rerunfailures==10.3
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyzmq==25.1.2
- requests==2.27.1
- selenium==3.141.0
- send2trash==1.8.3
- setuptools==59.6.0
- six==1.17.0
- sniffio==1.2.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- sphinxcontrib-spelling==7.7.0
- sqlalchemy==1.4.54
- terminado==0.12.1
- testpath==0.6.0
- tomli==1.2.3
- tornado==6.1
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- webencodings==0.5.1
- websocket-client==1.3.1
- widgetsnbextension==3.6.10
- zipp==3.6.0
prefix: /opt/conda/envs/nbgrader
| [
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_force_f",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_force_f",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_remove_with_submissions_f",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_remove_with_submissions_f",
"nbgrader/tests/apps/test_nbgrader_quickstart.py::TestNbGraderQuickStart::test_quickstart_f",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_force_release_f",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_extract_single_notebook_f"
] | [
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_force_single_notebook",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_update_newer_single_notebook",
"nbgrader/tests/apps/test_nbgrader_feedback.py::TestNbGraderFeedback::test_single_file",
"nbgrader/tests/apps/test_nbgrader_feedback.py::TestNbGraderFeedback::test_force",
"nbgrader/tests/apps/test_nbgrader_feedback.py::TestNbGraderFeedback::test_force_f",
"nbgrader/tests/apps/test_nbgrader_feedback.py::TestNbGraderFeedback::test_filter_notebook",
"nbgrader/tests/apps/test_nbgrader_feedback.py::TestNbGraderFeedback::test_permissions",
"nbgrader/tests/apps/test_nbgrader_feedback.py::TestNbGraderFeedback::test_custom_permissions",
"nbgrader/tests/apps/test_nbgrader_feedback.py::TestNbGraderFeedback::test_force_single_notebook",
"nbgrader/tests/apps/test_nbgrader_feedback.py::TestNbGraderFeedback::test_update_newer",
"nbgrader/tests/apps/test_nbgrader_feedback.py::TestNbGraderFeedback::test_update_newer_single_notebook"
] | [
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_help",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_no_args",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_conflicting_args",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_multiple_args",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_no_assignment",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_single_file",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_single_file_bad_assignment_name",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_multiple_files",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_dependent_files",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_save_cells",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_force",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_permissions",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_custom_permissions",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_add_remove_extra_notebooks",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_add_extra_notebooks_with_submissions",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_remove_extra_notebooks_with_submissions",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_same_notebooks_with_submissions",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_force_single_notebook",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_fail_no_notebooks",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_no_metadata",
"nbgrader/tests/apps/test_nbgrader_assign.py::TestNbGraderAssign::test_header",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_help",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_missing_student",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_missing_assignment",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_timestamp",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_empty_timestamp",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_late_submission_penalty_none",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_late_submission_penalty_zero",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_late_submission_penalty_plugin",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_force",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_filter_notebook",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_overwrite_files",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_overwrite_files_subdirs",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_side_effects",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_skip_extra_notebooks",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_permissions",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_custom_permissions",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_update_newer",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_hidden_tests_single_notebook",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_handle_failure",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_handle_failure_single_notebook",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_missing_source_kernelspec",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_incorrect_source_kernelspec",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_incorrect_submitted_kernelspec",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_no_execute",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_infinite_loop",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_missing_files",
"nbgrader/tests/apps/test_nbgrader_autograde.py::TestNbGraderAutograde::test_grade_missing_notebook",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_help",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_no_args",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_add",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_remove",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_remove_with_submissions",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_list",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_import",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_student_import_csv_spaces",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_add",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_remove",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_remove_with_submissions",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_list",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_import",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_assignment_import_csv_spaces",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_upgrade_nodb",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_upgrade_current_db",
"nbgrader/tests/apps/test_nbgrader_db.py::TestNbGraderDb::test_upgrade_old_db",
"nbgrader/tests/apps/test_nbgrader_feedback.py::TestNbGraderFeedback::test_help",
"nbgrader/tests/apps/test_nbgrader_quickstart.py::TestNbGraderQuickStart::test_help",
"nbgrader/tests/apps/test_nbgrader_quickstart.py::TestNbGraderQuickStart::test_no_course_id",
"nbgrader/tests/apps/test_nbgrader_quickstart.py::TestNbGraderQuickStart::test_quickstart",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_help",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_no_course_id",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_release",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_force_release",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_release_with_assignment_flag",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_no_exchange",
"nbgrader/tests/apps/test_nbgrader_release.py::TestNbGraderRelease::test_exchange_bad_perms",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_help",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_args",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_no_archive_dir",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_empty_folders",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_extract_single_notebook",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_extract_sub_dir_single_notebook",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_extract_archive",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_extract_archive_copies",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_no_regexp",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_bad_regexp",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_regexp_missing_student_id",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_regexp_bad_student_id_type",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_single_notebook",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_single_notebook_attempts",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_multiple_notebooks",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_sub_dir_single_notebook",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_invalid_notebook",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_timestamp_none",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_timestamp_empty_str",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_timestamp_bad_str",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_timestamp_skip_older",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_timestamp_replace_newer",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_timestamp_file",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_preserve_sub_dir",
"nbgrader/tests/apps/test_nbgrader_zip_collect.py::TestNbGraderZipCollect::test_collect_duplicate_fail"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,489 | 1,259 | [
"nbgrader/apps/assignapp.py",
"nbgrader/apps/autogradeapp.py",
"nbgrader/apps/dbapp.py",
"nbgrader/apps/feedbackapp.py",
"nbgrader/apps/quickstartapp.py",
"nbgrader/apps/releaseapp.py",
"nbgrader/apps/zipcollectapp.py"
] |
|
HECBioSim__Longbow-93 | 145a985cb0b3eb18fc3dd1f0dc74a9ee4e9c236c | 2018-05-10 10:37:37 | c81fcaccfa7fb2dc147e40970ef806dc6d6b22a4 | diff --git a/longbow/applications.py b/longbow/applications.py
index c693b42..8f99c1e 100755
--- a/longbow/applications.py
+++ b/longbow/applications.py
@@ -353,13 +353,15 @@ def _procfiles(job, arg, filelist, foundflags, substitution):
# Otherwise we have a replicate job so check these.
else:
- # Add the repX dir
- if ("rep" + str(rep)) not in filelist:
+ repx = str(job["replicate-naming"]) + str(rep)
- filelist.append("rep" + str(rep))
+ # Add the repx dir
+ if (repx) not in filelist:
+
+ filelist.append(repx)
fileitem = _procfilesreplicatejobs(
- app, arg, job["localworkdir"], initargs, rep)
+ app, arg, job["localworkdir"], initargs, repx)
job["executableargs"] = initargs
@@ -407,21 +409,21 @@ def _procfilessinglejob(app, arg, cwd):
return fileitem
-def _procfilesreplicatejobs(app, arg, cwd, initargs, rep):
+def _procfilesreplicatejobs(app, arg, cwd, initargs, repx):
"""Processor for replicate jobs."""
fileitem = ""
tmpitem = ""
# We should check that the replicate directory structure exists.
- if os.path.isdir(os.path.join(cwd, "rep" + str(rep))) is False:
+ if os.path.isdir(os.path.join(cwd, repx)) is False:
- os.mkdir(os.path.join(cwd, "rep" + str(rep)))
+ os.mkdir(os.path.join(cwd, repx))
# If we have a replicate job then we should check if the file resides
# within ./rep{i} or if it is a global (common to each replicate) file.
- if os.path.isfile(os.path.join(cwd, "rep" + str(rep), arg)):
+ if os.path.isfile(os.path.join(cwd, repx, arg)):
- fileitem = os.path.join("rep" + str(rep), arg)
+ fileitem = os.path.join(repx, arg)
# Otherwise do we have a file in cwd
elif os.path.isfile(os.path.join(cwd, arg)):
@@ -440,7 +442,7 @@ def _procfilesreplicatejobs(app, arg, cwd, initargs, rep):
try:
tmpitem, _ = getattr(apps, app.lower()).defaultfilename(
- cwd, os.path.join("rep" + str(rep), arg), "")
+ cwd, os.path.join(repx, arg), "")
except AttributeError:
diff --git a/longbow/configuration.py b/longbow/configuration.py
index 2e82db1..b35c5c8 100755
--- a/longbow/configuration.py
+++ b/longbow/configuration.py
@@ -103,6 +103,7 @@ JOBTEMPLATE = {
"remoteworkdir": "",
"resource": "",
"replicates": "1",
+ "replicate-naming": "rep",
"scheduler": "",
"scripts": "",
"slurm-gres": "",
| Allow replicate directory naming schemes
At the moment users have to have a specific fixed structure for replicates where the directory consists of rep[x] where the rep part is fixed and the number is incremented. Users have requested that the rep part is flexible so Longbow can be better chained with other tools. | HECBioSim/Longbow | diff --git a/tests/unit/applications/test_procfiles.py b/tests/unit/applications/test_procfiles.py
index 01542ab..a3a27f7 100644
--- a/tests/unit/applications/test_procfiles.py
+++ b/tests/unit/applications/test_procfiles.py
@@ -35,6 +35,7 @@ This testing module contains the tests for the applications module methods.
"""
from longbow.applications import _procfiles
+from longbow.configuration import JOBTEMPLATE
def test_procfiles_amber():
@@ -43,16 +44,16 @@ def test_procfiles_amber():
Test to make sure that the file and flag is picked up for an amber-like
command-line.
"""
+ job = JOBTEMPLATE.copy()
arg = "coords"
filelist = []
foundflags = []
- job = {
- "executable": "pmemd.MPI",
- "replicates": "1",
- "localworkdir": "tests/standards/jobs/single",
- "executableargs": ["-i", "input", "-c", "coords", "-p", "topol"]
- }
+
+ job["executable"] = "pmemd.MPI"
+ job["localworkdir"] = "tests/standards/jobs/single"
+ job["executableargs"] = ["-i", "input", "-c", "coords", "-p", "topol"]
+
substitution = {}
foundflags = _procfiles(job, arg, filelist, foundflags, substitution)
@@ -68,15 +69,15 @@ def test_procfiles_charmm():
command-line.
"""
+ job = JOBTEMPLATE.copy()
+
arg = "topol"
filelist = []
foundflags = []
- job = {
- "executable": "charmm",
- "replicates": "1",
- "localworkdir": "tests/standards/jobs/single",
- "executableargs": ["<", "topol"]
- }
+ job["executable"] = "charmm"
+ job["localworkdir"] = "tests/standards/jobs/single"
+ job["executableargs"] = ["<", "topol"]
+
substitution = {}
foundflags = _procfiles(job, arg, filelist, foundflags, substitution)
@@ -92,15 +93,15 @@ def test_procfiles_gromacs():
command-line.
"""
+ job = JOBTEMPLATE.copy()
+
arg = "test"
filelist = []
foundflags = []
- job = {
- "executable": "mdrun_mpi",
- "replicates": "1",
- "localworkdir": "tests/standards/jobs/single",
- "executableargs": ["-deffnm", "test"]
- }
+ job["executable"] = "mdrun_mpi"
+ job["localworkdir"] = "tests/standards/jobs/single"
+ job["executableargs"] = ["-deffnm", "test"]
+
substitution = {}
foundflags = _procfiles(job, arg, filelist, foundflags, substitution)
@@ -116,15 +117,15 @@ def test_procfiles_namd1():
command-line.
"""
+ job = JOBTEMPLATE.copy()
+
arg = "input"
filelist = []
foundflags = []
- job = {
- "executable": "namd2",
- "replicates": "1",
- "localworkdir": "tests/standards/jobs/single",
- "executableargs": ["input"]
- }
+ job["executable"] = "namd2"
+ job["localworkdir"] = "tests/standards/jobs/single"
+ job["executableargs"] = ["input"]
+
substitution = {}
foundflags = _procfiles(job, arg, filelist, foundflags, substitution)
@@ -140,15 +141,15 @@ def test_procfiles_namd2():
command-line.
"""
+ job = JOBTEMPLATE.copy()
+
arg = "input"
filelist = []
foundflags = []
- job = {
- "executable": "namd2",
- "replicates": "1",
- "localworkdir": "tests/standards/jobs/single",
- "executableargs": ["input", ">", "output"]
- }
+ job["executable"] = "namd2"
+ job["localworkdir"] = "tests/standards/jobs/single"
+ job["executableargs"] = ["input", ">", "output"]
+
substitution = {}
foundflags = _procfiles(job, arg, filelist, foundflags, substitution)
@@ -163,15 +164,16 @@ def test_procfiles_reps1():
Test for replicate variant.
"""
+ job = JOBTEMPLATE.copy()
+
arg = "coords"
filelist = []
foundflags = []
- job = {
- "executable": "pmemd.MPI",
- "replicates": "3",
- "localworkdir": "tests/standards/jobs/replicate",
- "executableargs": ["-i", "input", "-c", "coords", "-p", "topol"]
- }
+ job["executable"] = "pmemd.MPI"
+ job["replicates"] = "3"
+ job["localworkdir"] = "tests/standards/jobs/replicate"
+ job["executableargs"] = ["-i", "input", "-c", "coords", "-p", "topol"]
+
substitution = {}
foundflags = _procfiles(job, arg, filelist, foundflags, substitution)
@@ -187,15 +189,16 @@ def test_procfiles_reps2():
Test for replicate variant with global.
"""
+ job = JOBTEMPLATE.copy()
+
arg = "topol"
filelist = []
foundflags = []
- job = {
- "executable": "pmemd.MPI",
- "replicates": "3",
- "localworkdir": "tests/standards/jobs/replicate",
- "executableargs": ["-i", "input", "-c", "coords", "-p", "topol"]
- }
+ job["executable"] = "pmemd.MPI"
+ job["replicates"] = "3"
+ job["localworkdir"] = "tests/standards/jobs/replicate"
+ job["executableargs"] = ["-i", "input", "-c", "coords", "-p", "topol"]
+
substitution = {}
foundflags = _procfiles(job, arg, filelist, foundflags, substitution)
diff --git a/tests/unit/applications/test_procfilesreplicatejobs.py b/tests/unit/applications/test_procfilesreplicatejobs.py
index ca2e0a5..03c6253 100644
--- a/tests/unit/applications/test_procfilesreplicatejobs.py
+++ b/tests/unit/applications/test_procfilesreplicatejobs.py
@@ -57,7 +57,7 @@ def test_procfilesreplicatejobs_t1():
arg = "input"
cwd = os.path.join(os.getcwd(), "tests/standards/jobs/replicate")
initargs = ["-i", "input", "-c", "coords", "-p", "topol"]
- rep = 1
+ rep = "rep1"
fileitem = _procfilesreplicatejobs(app, arg, cwd, initargs, rep)
@@ -75,7 +75,7 @@ def test_procfilesreplicatejobs_t2():
arg = "topol"
cwd = os.path.join(os.getcwd(), "tests/standards/jobs/replicate")
initargs = ["-i", "input", "-c", "coords", "-p", "topol"]
- rep = 1
+ rep = "rep1"
fileitem = _procfilesreplicatejobs(app, arg, cwd, initargs, rep)
@@ -93,7 +93,7 @@ def test_procfilesreplicatejobs_t3():
arg = "test"
cwd = os.path.join(os.getcwd(), "tests/standards/jobs/replicate")
initargs = ["-i", "input", "-c", "coords", "-p", "topol"]
- rep = 1
+ rep = "rep1"
fileitem = _procfilesreplicatejobs(app, arg, cwd, initargs, rep)
@@ -110,7 +110,7 @@ def test_procfilesreplicatejobs_t4():
arg = "test"
cwd = os.path.join(os.getcwd(), "tests/standards/jobs/replicate")
initargs = ["-deffnm", "test"]
- rep = 2
+ rep = "rep2"
fileitem = _procfilesreplicatejobs(app, arg, cwd, initargs, rep)
@@ -128,7 +128,7 @@ def test_procfilesreplicatejobs_t5(m_mkdir):
arg = "test"
cwd = os.path.join(os.getcwd(), "tests/standards/jobs/replicate")
initargs = ["-deffnm", "test"]
- rep = 4
+ rep = "rep4"
fileitem = _procfilesreplicatejobs(app, arg, cwd, initargs, rep)
diff --git a/tests/unit/configuration/test_processconfigsresource.py b/tests/unit/configuration/test_processconfigsresource.py
index 04e5618..4a6bc8f 100644
--- a/tests/unit/configuration/test_processconfigsresource.py
+++ b/tests/unit/configuration/test_processconfigsresource.py
@@ -130,6 +130,7 @@ def test_processconfigsresource1():
"remoteworkdir": "",
"resource": "host1",
"replicates": "1",
+ "replicate-naming": "rep",
"scheduler": "",
"user": "",
"upload-exclude": "",
@@ -236,6 +237,7 @@ def test_processconfigsresource2():
"remoteworkdir": "",
"resource": "host2",
"replicates": "1",
+ "replicate-naming": "rep",
"scheduler": "",
"user": "",
"upload-exclude": "",
@@ -343,6 +345,7 @@ def test_processconfigsresource3():
"remoteworkdir": "",
"resource": "host1",
"replicates": "1",
+ "replicate-naming": "rep",
"scheduler": "",
"user": "",
"upload-exclude": "",
@@ -449,6 +452,7 @@ def test_processconfigsresource4():
"remoteworkdir": "",
"resource": "host3",
"replicates": "1",
+ "replicate-naming": "rep",
"scheduler": "",
"user": "",
"upload-exclude": "",
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 2
} | .1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
importlib-metadata==4.8.3
iniconfig==1.1.1
-e git+https://github.com/HECBioSim/Longbow.git@145a985cb0b3eb18fc3dd1f0dc74a9ee4e9c236c#egg=Longbow
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: Longbow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/Longbow
| [
"tests/unit/applications/test_procfilesreplicatejobs.py::test_procfilesreplicatejobs_t1",
"tests/unit/applications/test_procfilesreplicatejobs.py::test_procfilesreplicatejobs_t4"
] | [] | [
"tests/unit/applications/test_procfiles.py::test_procfiles_amber",
"tests/unit/applications/test_procfiles.py::test_procfiles_charmm",
"tests/unit/applications/test_procfiles.py::test_procfiles_gromacs",
"tests/unit/applications/test_procfiles.py::test_procfiles_namd1",
"tests/unit/applications/test_procfiles.py::test_procfiles_namd2",
"tests/unit/applications/test_procfiles.py::test_procfiles_reps1",
"tests/unit/applications/test_procfiles.py::test_procfiles_reps2",
"tests/unit/applications/test_procfilesreplicatejobs.py::test_procfilesreplicatejobs_t2",
"tests/unit/applications/test_procfilesreplicatejobs.py::test_procfilesreplicatejobs_t3",
"tests/unit/applications/test_procfilesreplicatejobs.py::test_procfilesreplicatejobs_t5",
"tests/unit/configuration/test_processconfigsresource.py::test_processconfigsresource1",
"tests/unit/configuration/test_processconfigsresource.py::test_processconfigsresource2",
"tests/unit/configuration/test_processconfigsresource.py::test_processconfigsresource3",
"tests/unit/configuration/test_processconfigsresource.py::test_processconfigsresource4",
"tests/unit/configuration/test_processconfigsresource.py::test_processconfigsresource5"
] | [] | BSD 3-Clause License | 2,494 | 770 | [
"longbow/applications.py",
"longbow/configuration.py"
] |
|
conan-io__conan-2883 | ed6f652b917dd973a92e91866681e3663b2e04f2 | 2018-05-10 14:23:08 | c3a6ed5dc7b5e27ac69191e36aa7592e47ce7759 | diff --git a/conans/client/build/cmake.py b/conans/client/build/cmake.py
index 2702d2b9e..3a2494873 100644
--- a/conans/client/build/cmake.py
+++ b/conans/client/build/cmake.py
@@ -115,8 +115,11 @@ class CMake(object):
return os.environ["CONAN_CMAKE_GENERATOR"]
if not self._compiler or not self._compiler_version or not self._arch:
- raise ConanException("You must specify compiler, compiler.version and arch in "
- "your settings to use a CMake generator")
+ if self._os_build == "Windows":
+ raise ConanException("You must specify compiler, compiler.version and arch in "
+ "your settings to use a CMake generator. You can also declare "
+ "the env variable CONAN_CMAKE_GENERATOR.")
+ return "Unix Makefiles"
if self._compiler == "Visual Studio":
_visuals = {'8': '8 2005',
| Settings friction review
I've been experimenting with non-common settings for embedded devices and I've found a couple of issues:
1. We have to review why the CMake() build helper is raising in case it doesn't have compiler or architecture. It is very very uncomfortable to specify the settings and deleting them later for the package_id just to avoid the error. And it makes no sense.
2. If I install a `conanfile.txt` but without specifying `os` setting, it is crashing in the settings.py validate() method. Because somehow it has the os in the fields but not in the data.
| conan-io/conan | diff --git a/conans/test/build_helpers/cmake_test.py b/conans/test/build_helpers/cmake_test.py
index 85d1ba227..ff1802915 100644
--- a/conans/test/build_helpers/cmake_test.py
+++ b/conans/test/build_helpers/cmake_test.py
@@ -773,6 +773,29 @@ build_type: [ Release]
cmake = CMake(conan_file)
self.assertIn('-T "v140"', cmake.command_line)
+
+ def test_missing_settings(self):
+ def instance_with_os_build(os_build):
+ settings = Settings.loads(default_settings_yml)
+ settings.os_build = os_build
+ conan_file = ConanFileMock()
+ conan_file.settings = settings
+ return CMake(conan_file)
+
+ cmake = instance_with_os_build("Linux")
+ self.assertEquals(cmake.generator, "Unix Makefiles")
+
+ cmake = instance_with_os_build("Macos")
+ self.assertEquals(cmake.generator, "Unix Makefiles")
+
+ with self.assertRaisesRegexp(ConanException, "You must specify compiler, "
+ "compiler.version and arch"):
+ instance_with_os_build("Windows")
+
+ with tools.environment_append({"CONAN_CMAKE_GENERATOR": "MyCoolGenerator"}):
+ cmake = instance_with_os_build("Windows")
+ self.assertEquals(cmake.generator, "MyCoolGenerator")
+
def test_cmake_system_version_android(self):
with tools.environment_append({"CONAN_CMAKE_SYSTEM_NAME": "SomeSystem",
"CONAN_CMAKE_GENERATOR": "SomeGenerator"}):
diff --git a/conans/test/integration/cmake_flags_test.py b/conans/test/integration/cmake_flags_test.py
index a74a320ba..2af97aeff 100644
--- a/conans/test/integration/cmake_flags_test.py
+++ b/conans/test/integration/cmake_flags_test.py
@@ -254,10 +254,13 @@ class MyLib(ConanFile):
client = TestClient()
client.save({"conanfile.py": conanfile % settings_line})
client.run("install .")
- client.run("build .", ignore_error=True)
-
- self.assertIn("You must specify compiler, compiler.version and arch in "
- "your settings to use a CMake generator", client.user_io.out,)
+ error = client.run("build .", ignore_error=True)
+ if platform.system() == "Windows":
+ self.assertTrue(error)
+ self.assertIn("You must specify compiler, compiler.version and arch in "
+ "your settings to use a CMake generator", client.user_io.out,)
+ else:
+ self.assertFalse(error)
def cmake_shared_flag_test(self):
conanfile = """
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 1
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc",
"apt-get install -y cmake",
"apt-get install -y golang"
],
"python": "3.6",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_osx.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asn1crypto==1.5.1
astroid==1.6.6
attrs==22.2.0
beautifulsoup4==4.12.3
bottle==0.12.25
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
colorama==0.3.9
-e git+https://github.com/conan-io/conan.git@ed6f652b917dd973a92e91866681e3663b2e04f2#egg=conan
coverage==4.2
cryptography==2.1.4
deprecation==2.0.7
distro==1.1.0
execnet==1.9.0
fasteners==0.19
future==0.16.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==1.3.0
ndg-httpsclient==0.4.4
node-semver==0.2.0
nose==1.3.7
packaging==21.3
parameterized==0.8.1
patch==1.16
pbr==6.1.1
pluggy==1.0.0
pluginbase==0.7
py==1.11.0
pyasn==1.5.0b7
pyasn1==0.5.1
pycparser==2.21
Pygments==2.14.0
PyJWT==1.7.1
pylint==1.8.4
pyOpenSSL==17.5.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
PyYAML==3.12
requests==2.27.1
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
waitress==2.0.0
WebOb==1.8.9
WebTest==2.0.35
wrapt==1.16.0
zipp==3.6.0
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asn1crypto==1.5.1
- astroid==1.6.6
- attrs==22.2.0
- beautifulsoup4==4.12.3
- bottle==0.12.25
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- colorama==0.3.9
- coverage==4.2
- cryptography==2.1.4
- deprecation==2.0.7
- distro==1.1.0
- execnet==1.9.0
- fasteners==0.19
- future==0.16.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==1.3.0
- ndg-httpsclient==0.4.4
- node-semver==0.2.0
- nose==1.3.7
- packaging==21.3
- parameterized==0.8.1
- patch==1.16
- pbr==6.1.1
- pluggy==1.0.0
- pluginbase==0.7
- py==1.11.0
- pyasn==1.5.0b7
- pyasn1==0.5.1
- pycparser==2.21
- pygments==2.14.0
- pyjwt==1.7.1
- pylint==1.8.4
- pyopenssl==17.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- pyyaml==3.12
- requests==2.27.1
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- waitress==2.0.0
- webob==1.8.9
- webtest==2.0.35
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/conan
| [
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_missing_settings"
] | [] | [
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_clean_sh_path",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_cmake_system_version_android",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_cores_ancient_visual",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_deprecated_behaviour",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_run_tests",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_shared",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_sysroot",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_verbose"
] | [] | MIT License | 2,496 | 242 | [
"conans/client/build/cmake.py"
] |
|
EdinburghGenomics__pyclarity-lims-33 | d73f5b7d76f0d65b4fe51fbc80e4bf9f49903a6c | 2018-05-11 11:29:17 | a03be6eda34f0d8adaf776d2286198a34e40ecf5 | diff --git a/pyclarity_lims/lims.py b/pyclarity_lims/lims.py
index c00b1a1..532b315 100644
--- a/pyclarity_lims/lims.py
+++ b/pyclarity_lims/lims.py
@@ -210,7 +210,8 @@ class Lims(object):
root = ElementTree.fromstring(response.content)
return root
- def get_udfs(self, name=None, attach_to_name=None, attach_to_category=None, start_index=None, add_info=False):
+ def get_udfs(self, name=None, attach_to_name=None, attach_to_category=None, start_index=None, nb_pages=-1,
+ add_info=False):
"""Get a list of udfs, filtered by keyword arguments.
:param name: name of udf
@@ -218,7 +219,9 @@ class Lims(object):
Sample, Project, Container, or the name of a process.
:param attach_to_category: If 'attach_to_name' is the name of a process, such as 'CaliperGX QC (DNA)',
then you need to set attach_to_category='ProcessType'. Must not be provided otherwise.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
"""
@@ -226,21 +229,23 @@ class Lims(object):
attach_to_name=attach_to_name,
attach_to_category=attach_to_category,
start_index=start_index)
- return self._get_instances(Udfconfig, add_info=add_info, params=params)
+ return self._get_instances(Udfconfig, add_info=add_info, nb_pages=nb_pages, params=params)
- def get_reagent_types(self, name=None, start_index=None):
+ def get_reagent_types(self, name=None, start_index=None, nb_pages=-1):
"""
Get a list of reagent types, filtered by keyword arguments.
:param name: Reagent type name, or list of names.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
"""
params = self._get_params(name=name,
start_index=start_index)
- return self._get_instances(ReagentType, params=params)
+ return self._get_instances(ReagentType, nb_pages=nb_pages, params=params)
def get_labs(self, name=None, last_modified=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None, add_info=False):
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1, add_info=False):
"""Get a list of labs, filtered by keyword arguments.
:param name: Lab name, or list of names.
@@ -249,7 +254,9 @@ class Lims(object):
:param udtname: UDT name, or list of names.
:param udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
"""
@@ -257,11 +264,11 @@ class Lims(object):
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
- return self._get_instances(Lab, add_info=add_info, params=params)
+ return self._get_instances(Lab, add_info=add_info, nb_pages=nb_pages, params=params)
def get_researchers(self, firstname=None, lastname=None, username=None,
last_modified=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None,
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1,
add_info=False):
"""Get a list of researchers, filtered by keyword arguments.
@@ -273,7 +280,9 @@ class Lims(object):
:param udtname: UDT name, or list of names.
:param udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
@@ -284,10 +293,10 @@ class Lims(object):
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
- return self._get_instances(Researcher, add_info=add_info, params=params)
+ return self._get_instances(Researcher, add_info=add_info, nb_pages=nb_pages, params=params)
def get_projects(self, name=None, open_date=None, last_modified=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None,
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1,
add_info=False):
"""Get a list of projects, filtered by keyword arguments.
@@ -298,7 +307,9 @@ class Lims(object):
:param udtname: UDT name, or list of names.
:param udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
@@ -308,14 +319,16 @@ class Lims(object):
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
- return self._get_instances(Project, add_info=add_info, params=params)
+ return self._get_instances(Project, add_info=add_info, nb_pages=nb_pages, params=params)
def get_sample_number(self, name=None, projectname=None, projectlimsid=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None):
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1):
"""
Gets the number of samples matching the query without fetching every
sample, so it should be faster than len(get_samples())
"""
+ # TODO: I doubt that this make any difference in terms of speed since the only thing it save is the Sample
+ # construction. We should test and a replace with len(get_samples())
params = self._get_params(name=name,
projectname=projectname,
projectlimsid=projectlimsid,
@@ -331,7 +344,7 @@ class Lims(object):
return total
def get_samples(self, name=None, projectname=None, projectlimsid=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None):
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1):
"""Get a list of samples, filtered by keyword arguments.
:param name: Sample name, or list of names.
@@ -341,21 +354,22 @@ class Lims(object):
:param udtname: UDT name, or list of names.
:param udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
- :param start_index: Page to retrieve; all if None.
-
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
"""
params = self._get_params(name=name,
projectname=projectname,
projectlimsid=projectlimsid,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
- return self._get_instances(Sample, params=params)
+ return self._get_instances(Sample, nb_pages=nb_pages, params=params)
def get_artifacts(self, name=None, type=None, process_type=None,
artifact_flag_name=None, working_flag=None, qc_flag=None,
sample_name=None, samplelimsid=None, artifactgroup=None, containername=None,
containerlimsid=None, reagent_label=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None,
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1,
resolve=False):
"""Get a list of artifacts, filtered by keyword arguments.
@@ -375,9 +389,10 @@ class Lims(object):
:param udtname: UDT name, or list of names.
:param udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param resolve: Send a batch query to the lims to get the content of all artifacts retrieved
-
"""
params = self._get_params(name=name,
type=type,
@@ -394,13 +409,13 @@ class Lims(object):
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
if resolve:
- return self.get_batch(self._get_instances(Artifact, params=params))
+ return self.get_batch(self._get_instances(Artifact, nb_pages=nb_pages, params=params))
else:
- return self._get_instances(Artifact, params=params)
+ return self._get_instances(Artifact, nb_pages=nb_pages, params=params)
def get_containers(self, name=None, type=None,
state=None, last_modified=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None,
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1,
add_info=False):
"""Get a list of containers, filtered by keyword arguments.
@@ -412,10 +427,11 @@ class Lims(object):
:param udtname: UDT name, or list of names.
:param udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
-
"""
params = self._get_params(name=name,
type=type,
@@ -423,24 +439,25 @@ class Lims(object):
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
- return self._get_instances(Container, add_info=add_info, params=params)
+ return self._get_instances(Container, add_info=add_info, nb_pages=nb_pages, params=params)
- def get_container_types(self, name=None, start_index=None, add_info=False):
+ def get_container_types(self, name=None, start_index=None, nb_pages=-1, add_info=False):
"""Get a list of container types, filtered by keyword arguments.
:param name: name of the container type or list of names.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
-
"""
params = self._get_params(name=name, start_index=start_index)
- return self._get_instances(Containertype, add_info=add_info, params=params)
+ return self._get_instances(Containertype, add_info=add_info, nb_pages=nb_pages, params=params)
def get_processes(self, last_modified=None, type=None,
inputartifactlimsid=None,
techfirstname=None, techlastname=None, projectname=None,
- udf=dict(), udtname=None, udt=dict(), start_index=None):
+ udf=dict(), udtname=None, udt=dict(), start_index=None, nb_pages=-1):
"""Get a list of processes, filtered by keyword arguments.
:param last_modified: Since the given ISO format datetime.
@@ -453,7 +470,9 @@ class Lims(object):
:param techfirstname: First name of researcher, or list of.
:param techlastname: Last name of researcher, or list of.
:param projectname: Name of project, or list of.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
"""
params = self._get_params(last_modified=last_modified,
type=type,
@@ -463,7 +482,7 @@ class Lims(object):
projectname=projectname,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
- return self._get_instances(Process, params=params)
+ return self._get_instances(Process, nb_pages=nb_pages, params=params)
def get_workflows(self, name=None, add_info=False):
"""
@@ -513,32 +532,35 @@ class Lims(object):
params = self._get_params(name=name)
return self._get_instances(Protocol, add_info=add_info, params=params)
- def get_reagent_kits(self, name=None, start_index=None, add_info=False):
+ def get_reagent_kits(self, name=None, start_index=None, nb_pages=-1, add_info=False):
"""Get a list of reagent kits, filtered by keyword arguments.
:param name: reagent kit name, or list of names.
- :param start_index: Page to retrieve; all if None.
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
:param add_info: Change the return type to a tuple where the first element is normal return and
the second is a dict of additional information provided in the query.
"""
params = self._get_params(name=name,
start_index=start_index)
- return self._get_instances(ReagentKit, add_info=add_info, params=params)
+ return self._get_instances(ReagentKit, add_info=add_info, nb_pages=nb_pages, params=params)
def get_reagent_lots(self, name=None, kitname=None, number=None,
- start_index=None):
+ start_index=None, nb_pages=-1):
"""Get a list of reagent lots, filtered by keyword arguments.
:param name: reagent kit name, or list of names.
:param kitname: name of the kit this lots belong to
:param number: lot number or list of lot number
- :param start_index: Page to retrieve; all if None.
-
+ :param start_index: first element to retrieve; start at first element if None.
+ :param nb_pages: number of page to iterate over. The page size is 500 by default unless configured otherwise
+ in your LIMS. 0 or negative numbers returns all pages.
"""
params = self._get_params(name=name, kitname=kitname, number=number,
start_index=start_index)
- return self._get_instances(ReagentLot, params=params)
+ return self._get_instances(ReagentLot, nb_pages=nb_pages, params=params)
def _get_params(self, **kwargs):
"""Convert keyword arguments to a kwargs dictionary."""
@@ -560,14 +582,15 @@ class Lims(object):
result["udt.%s" % key] = value
return result
- def _get_instances(self, klass, add_info=None, params=dict()):
+ def _get_instances(self, klass, add_info=None, nb_pages=-1, params=dict()):
results = []
additionnal_info_dicts = []
tag = klass._TAG
if tag is None:
tag = klass.__name__.lower()
root = self.get(self.get_uri(klass._URI), params=params)
- while params.get('start-index') is None: # Loop over all pages.
+ while root: # Loop over all requested pages.
+ nb_pages -= 1
for node in root.findall(tag):
results.append(klass(self, uri=node.attrib['uri']))
info_dict = {}
@@ -577,9 +600,10 @@ class Lims(object):
info_dict[subnode.tag] = subnode.text
additionnal_info_dicts.append(info_dict)
node = root.find('next-page')
- if node is None:
- break
- root = self.get(node.attrib['uri'], params=params)
+ if node is None or nb_pages == 0:
+ root = None
+ else:
+ root = self.get(node.attrib['uri'], params=params)
if add_info:
return results, additionnal_info_dicts
else:
| Lims _get_instances() returns empty array when start_index is set.
An empty array is always returned when retrieving a list of entities using the `get_*` methods of the`Lims` class. For example:
```
samples = l.get_samples(start_index=500)
# samples == []
```
The problem is in the [_get_instances()](https://github.com/EdinburghGenomics/pyclarity-lims/blob/master/pyclarity_lims/lims.py#L563-L586) method. The response is only parsed when `start_index` is `None`. | EdinburghGenomics/pyclarity-lims | diff --git a/tests/test_lims.py b/tests/test_lims.py
index 58fe1a7..0bd29c5 100644
--- a/tests/test_lims.py
+++ b/tests/test_lims.py
@@ -1,5 +1,7 @@
from unittest import TestCase
from requests.exceptions import HTTPError
+
+from pyclarity_lims.entities import Sample
from pyclarity_lims.lims import Lims
try:
callable(1)
@@ -143,3 +145,63 @@ class TestLims(TestCase):
assert lims.get_file_contents(id='an_id', encoding='utf-16', crlf=True) == 'some data\n'
assert lims.request_session.get.return_value.encoding == 'utf-16'
lims.request_session.get.assert_called_with(exp_url, auth=(self.username, self.password), timeout=16)
+
+ def test_get_instances(self):
+ lims = Lims(self.url, username=self.username, password=self.password)
+ sample_xml_template = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+ <smp:samples xmlns:smp="http://pyclarity_lims.com/ri/sample">
+ <sample uri="{url}/api/v2/samples/{s1}" limsid="{s1}"/>
+ <sample uri="{url}/api/v2/samples/{s2}" limsid="{s2}"/>
+ {next_page}
+ </smp:samples>
+ """
+ sample_xml1 = sample_xml_template.format(
+ s1='sample1', s2='sample2',
+ url=self.url,
+ next_page='<next-page uri="{url}/api/v2/samples?start-index=3"/>'.format(url=self.url)
+ )
+ sample_xml2 = sample_xml_template.format(
+ s1='sample3', s2='sample4',
+ url=self.url,
+ next_page='<next-page uri="{url}/api/v2/samples?start-index=5"/>'.format(url=self.url)
+ )
+ sample_xml3 = sample_xml_template.format(
+ s1='sample5', s2='sample6',
+ url=self.url,
+ next_page=''
+ )
+ get_returns = [
+ Mock(content=sample_xml1, status_code=200),
+ Mock(content=sample_xml2, status_code=200),
+ Mock(content=sample_xml3, status_code=200)
+ ]
+
+ with patch('requests.Session.get', side_effect=get_returns) as mget:
+ samples = lims._get_instances(Sample, nb_pages=2, params={'projectname': 'p1'})
+ assert len(samples) == 4
+ assert mget.call_count == 2
+ mget.assert_any_call(
+ 'http://testgenologics.com:4040/api/v2/samples',
+ auth=('test', 'password'),
+ headers={'accept': 'application/xml'},
+ params={'projectname': 'p1'},
+ timeout=16
+ )
+ mget.assert_called_with(
+ 'http://testgenologics.com:4040/api/v2/samples?start-index=3',
+ auth=('test', 'password'),
+ headers={'accept': 'application/xml'},
+ params={'projectname': 'p1'},
+ timeout=16
+ )
+
+ with patch('requests.Session.get', side_effect=get_returns) as mget:
+ samples = lims._get_instances(Sample, nb_pages=0)
+ assert len(samples) == 6
+ assert mget.call_count == 3
+
+ with patch('requests.Session.get', side_effect=get_returns) as mget:
+ samples = lims._get_instances(Sample, nb_pages=-1)
+ assert len(samples) == 6
+ assert mget.call_count == 3
+
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
charset-normalizer==2.0.12
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
packaging==21.3
pluggy==1.0.0
py==1.11.0
-e git+https://github.com/EdinburghGenomics/pyclarity-lims.git@d73f5b7d76f0d65b4fe51fbc80e4bf9f49903a6c#egg=pyclarity_lims
pyparsing==3.1.4
pytest==7.0.1
requests==2.27.1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
zipp==3.6.0
| name: pyclarity-lims
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- charset-normalizer==2.0.12
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- requests==2.27.1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- zipp==3.6.0
prefix: /opt/conda/envs/pyclarity-lims
| [
"tests/test_lims.py::TestLims::test_get_instances"
] | [] | [
"tests/test_lims.py::TestLims::test_get",
"tests/test_lims.py::TestLims::test_get_file_contents",
"tests/test_lims.py::TestLims::test_get_uri",
"tests/test_lims.py::TestLims::test_parse_response",
"tests/test_lims.py::TestLims::test_post",
"tests/test_lims.py::TestLims::test_put",
"tests/test_lims.py::TestLims::test_route_artifact",
"tests/test_lims.py::TestLims::test_tostring",
"tests/test_lims.py::TestLims::test_upload_new_file"
] | [] | MIT License | 2,502 | 4,714 | [
"pyclarity_lims/lims.py"
] |
|
google__mobly-444 | 8caa5c387b2df47a180e0349fbebe7838b099b83 | 2018-05-11 15:02:16 | 95286a01a566e056d44acfa9577a45bc7f37f51d | diff --git a/mobly/controllers/android_device_lib/adb.py b/mobly/controllers/android_device_lib/adb.py
index 12c14bd..cb5b6b6 100644
--- a/mobly/controllers/android_device_lib/adb.py
+++ b/mobly/controllers/android_device_lib/adb.py
@@ -237,7 +237,8 @@ class AdbProxy(object):
def forward(self, args=None, shell=False):
with ADB_PORT_LOCK:
- return self._exec_adb_cmd('forward', args, shell, timeout=None)
+ return self._exec_adb_cmd(
+ 'forward', args, shell, timeout=None, stderr=None)
def instrument(self, package, options=None, runner=None):
"""Runs an instrumentation command on the device.
| `current_test_info` should exist between `setup_class` and `setup_test`
Right now `current_test_info` is None between `setup_class` and `setup_test`, which makes it difficult to use this field consistently.
E.g. if a test relies on this field in `on_fail`, if `setup_class` fails, the logic in `on_fail` would raise an exception for any call to `current_test_info`. | google/mobly | diff --git a/mobly/base_test.py b/mobly/base_test.py
index e4e047b..13a79b0 100644
--- a/mobly/base_test.py
+++ b/mobly/base_test.py
@@ -624,6 +624,10 @@ class BaseTestClass(object):
tests = self._get_test_methods(test_names)
try:
# Setup for the class.
+ class_record = records.TestResultRecord('setup_class', self.TAG)
+ class_record.test_begin()
+ self.current_test_info = runtime_test_info.RuntimeTestInfo(
+ 'setup_class', self.log_path, class_record)
try:
self._setup_class()
except signals.TestAbortSignal:
@@ -633,9 +637,6 @@ class BaseTestClass(object):
# Setup class failed for unknown reasons.
# Fail the class and skip all tests.
logging.exception('Error in setup_class %s.', self.TAG)
- class_record = records.TestResultRecord(
- 'setup_class', self.TAG)
- class_record.test_begin()
class_record.test_error(e)
self._exec_procedure_func(self._on_fail, class_record)
self.results.add_class_error(class_record)
diff --git a/mobly/runtime_test_info.py b/mobly/runtime_test_info.py
index f4eea99..57b0742 100644
--- a/mobly/runtime_test_info.py
+++ b/mobly/runtime_test_info.py
@@ -19,10 +19,13 @@ from mobly import utils
class RuntimeTestInfo(object):
- """Container class for runtime information of a test.
+ """Container class for runtime information of a test or test stage.
One object corresponds to one test. This is meant to be a read-only class.
+ This also applies to test stages like `setup_class`, which has its own
+ runtime info but is not part of any single test.
+
Attributes:
name: string, name of the test.
signature: string, an identifier of the test, a combination of test
diff --git a/tests/mobly/base_test_test.py b/tests/mobly/base_test_test.py
index d78a640..a38b532 100755
--- a/tests/mobly/base_test_test.py
+++ b/tests/mobly/base_test_test.py
@@ -91,6 +91,25 @@ class BaseTestTest(unittest.TestCase):
self.assertIsNone(actual_record.details)
self.assertIsNone(actual_record.extras)
+ def test_current_test_info_in_setup_class(self):
+ class MockBaseTest(base_test.BaseTestClass):
+ def setup_class(self):
+ asserts.assert_true(
+ self.current_test_info.name == 'setup_class',
+ 'Got unexpected test name %s.' %
+ self.current_test_info.name)
+ output_path = self.current_test_info.output_path
+ asserts.assert_true(
+ os.path.exists(output_path), 'test output path missing')
+ raise Exception(MSG_EXPECTED_EXCEPTION)
+
+ bt_cls = MockBaseTest(self.mock_test_cls_configs)
+ bt_cls.run()
+ actual_record = bt_cls.results.error[0]
+ self.assertEqual(actual_record.test_name, 'setup_class')
+ self.assertEqual(actual_record.details, MSG_EXPECTED_EXCEPTION)
+ self.assertIsNone(actual_record.extras)
+
def test_self_tests_list(self):
class MockBaseTest(base_test.BaseTestClass):
def __init__(self, controllers):
diff --git a/tests/mobly/controllers/android_device_lib/adb_test.py b/tests/mobly/controllers/android_device_lib/adb_test.py
index 7bf61ab..cf699ce 100755
--- a/tests/mobly/controllers/android_device_lib/adb_test.py
+++ b/tests/mobly/controllers/android_device_lib/adb_test.py
@@ -173,6 +173,10 @@ class AdbTest(unittest.TestCase):
self.assertEqual(MOCK_DEFAULT_STDERR,
stderr_redirect.getvalue().decode('utf-8'))
+ def test_forward(self):
+ with mock.patch.object(adb.AdbProxy, '_exec_cmd') as mock_exec_cmd:
+ adb.AdbProxy().forward(MOCK_SHELL_COMMAND)
+
def test_instrument_without_parameters(self):
"""Verifies the AndroidDevice object's instrument command is correct in
the basic case.
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 3
},
"num_modified_files": 1
} | 1.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.7",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi @ file:///croot/certifi_1671487769961/work/certifi
exceptiongroup==1.2.2
future==1.0.0
importlib-metadata==6.7.0
iniconfig==2.0.0
-e git+https://github.com/google/mobly.git@8caa5c387b2df47a180e0349fbebe7838b099b83#egg=mobly
mock==1.0.1
packaging==24.0
pluggy==1.2.0
portpicker==1.6.0
psutil==7.0.0
pyserial==3.5
pytest==7.4.4
pytz==2025.2
PyYAML==6.0.1
timeout-decorator==0.5.0
tomli==2.0.1
typing_extensions==4.7.1
zipp==3.15.0
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2022.12.7=py37h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=22.3.1=py37h06a4308_0
- python=3.7.16=h7a1cb2a_0
- readline=8.2=h5eee18b_0
- setuptools=65.6.3=py37h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.38.4=py37h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- future==1.0.0
- importlib-metadata==6.7.0
- iniconfig==2.0.0
- mock==1.0.1
- packaging==24.0
- pluggy==1.2.0
- portpicker==1.6.0
- psutil==7.0.0
- pyserial==3.5
- pytest==7.4.4
- pytz==2025.2
- pyyaml==6.0.1
- timeout-decorator==0.5.0
- tomli==2.0.1
- typing-extensions==4.7.1
- zipp==3.15.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_forward"
] | [
"tests/mobly/base_test_test.py::BaseTestTest::test_write_user_data"
] | [
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_on_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_on_fail_from_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_setup_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_in_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_all_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_on_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_setup_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_in_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_abort_class_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_fail_with_msg",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_equal_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_fail_with_wrong_regex",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_noop",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_fail_with_wrong_error",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_raises_regex_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_assert_true",
"tests/mobly/base_test_test.py::BaseTestTest::test_both_teardown_and_test_body_raise_exceptions",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_cli_test_selection_override_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_current_test_info",
"tests/mobly/base_test_test.py::BaseTestTest::test_current_test_info_in_setup_class",
"tests/mobly/base_test_test.py::BaseTestTest::test_current_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_default_execution_of_all_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_exception_objects_in_record",
"tests/mobly/base_test_test.py::BaseTestTest::test_expect_equal",
"tests/mobly/base_test_test.py::BaseTestTest::test_expect_false",
"tests/mobly/base_test_test.py::BaseTestTest::test_expect_in_teardown_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_expect_multiple_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_expect_no_op",
"tests/mobly/base_test_test.py::BaseTestTest::test_expect_no_raises_custom_msg",
"tests/mobly/base_test_test.py::BaseTestTest::test_expect_no_raises_default_msg",
"tests/mobly/base_test_test.py::BaseTestTest::test_expect_true",
"tests/mobly/base_test_test.py::BaseTestTest::test_expect_true_and_assert_true",
"tests/mobly/base_test_test.py::BaseTestTest::test_expect_two_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_explicit_pass_but_teardown_test_raises_an_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_in_procedure_functions_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_failure_to_call_procedure_function_is_recorded",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_call_outside_of_setup_generated_tests",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_dup_test_name",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_generate_tests_selected_run",
"tests/mobly/base_test_test.py::BaseTestTest::test_implicit_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_missing_requested_test_func",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_cannot_modify_original_record",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_both_test_and_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_teardown_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_executed_if_test_setup_fails_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_fail_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_cannot_modify_original_record",
"tests/mobly/base_test_test.py::BaseTestTest::test_on_pass_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_procedure_function_gets_correct_record",
"tests/mobly/base_test_test.py::BaseTestTest::test_promote_extra_errors_to_termination_signal",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_self_tests_list_fail_by_convention",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_and_teardown_execution_count",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_class_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_setup_test_fail_by_test_signal",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip_if",
"tests/mobly/base_test_test.py::BaseTestTest::test_skip_in_setup_test",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_class_fail_by_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_assert_fail",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_setup_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_fails",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_executed_if_test_pass",
"tests/mobly/base_test_test.py::BaseTestTest::test_teardown_test_raise_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_uncaught_exception",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_basic",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_None",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_optional_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_default_overwrite_by_required_param_list",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_missing",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_optional_with_default",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required",
"tests/mobly/base_test_test.py::BaseTestTest::test_unpack_userparams_required_missing",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_cli_cmd_to_string",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_serial",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_shell_true",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_shell_true_with_serial",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_stderr_pipe",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_error_no_timeout",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_no_timeout_success",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_timed_out",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_with_negative_timeout_value",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_with_timeout_success",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_called_correctly",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_existing_command",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_missing_command_on_newer_devices",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_missing_command_on_older_devices",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_options",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_runner",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_without_parameters"
] | [] | Apache License 2.0 | 2,505 | 182 | [
"mobly/controllers/android_device_lib/adb.py"
] |
|
python-useful-helpers__exec-helpers-38 | 63166d1ac340be47d64488a5b84a9d6fa317e8fe | 2018-05-14 10:14:06 | 814d435b7eda2b00fa1559d5a94103f1e888ab52 | diff --git a/exec_helpers/_api.py b/exec_helpers/_api.py
index 3283474..55c32ad 100644
--- a/exec_helpers/_api.py
+++ b/exec_helpers/_api.py
@@ -33,7 +33,6 @@ from exec_helpers import constants
from exec_helpers import exceptions
from exec_helpers import exec_result # noqa # pylint: disable=unused-import
from exec_helpers import proc_enums
-from exec_helpers import _log_templates
_type_exit_codes = typing.Union[int, proc_enums.ExitCodes]
_type_expected = typing.Optional[typing.Iterable[_type_exit_codes]]
@@ -249,7 +248,7 @@ class ExecHelper(object):
verbose=verbose,
**kwargs
)
- message = _log_templates.CMD_RESULT.format(result=result)
+ message = "Command {result.cmd!r} exit code: {result.exit_code!s}".format(result=result)
self.logger.log(
level=logging.INFO if verbose else logging.DEBUG,
msg=message
@@ -292,7 +291,8 @@ class ExecHelper(object):
ret = self.execute(command, verbose, timeout, **kwargs)
if ret['exit_code'] not in expected:
message = (
- _log_templates.CMD_UNEXPECTED_EXIT_CODE.format(
+ "{append}Command {result.cmd!r} returned exit code "
+ "{result.exit_code!s} while expected {expected!s}".format(
append=error_info + '\n' if error_info else '',
result=ret,
expected=expected
@@ -339,7 +339,8 @@ class ExecHelper(object):
error_info=error_info, raise_on_err=raise_on_err, **kwargs)
if ret['stderr']:
message = (
- _log_templates.CMD_UNEXPECTED_STDERR.format(
+ "{append}Command {result.cmd!r} STDERR while not expected\n"
+ "\texit code: {result.exit_code!s}".format(
append=error_info + '\n' if error_info else '',
result=ret,
))
diff --git a/exec_helpers/_log_templates.py b/exec_helpers/_log_templates.py
index d3f8c3b..56cda0f 100644
--- a/exec_helpers/_log_templates.py
+++ b/exec_helpers/_log_templates.py
@@ -20,18 +20,10 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
-CMD_EXEC = "Executing command:\n{cmd!s}\n"
-CMD_RESULT = "Command exit code '{result.exit_code!s}':\n{result.cmd}\n"
-CMD_UNEXPECTED_EXIT_CODE = (
- "{append}Command '{result.cmd}' returned exit code '{result.exit_code!s}' "
- "while expected '{expected!s}'"
-)
-CMD_UNEXPECTED_STDERR = (
- "{append}Command '{result.cmd}' STDERR while not expected\n"
- "\texit code: '{result.exit_code!s}'"
-)
+CMD_EXEC = "Executing command:\n{cmd!r}\n"
+
CMD_WAIT_ERROR = (
- "Wait for '{result.cmd}' during {timeout!s}s: no return code!\n"
+ "Wait for {result.cmd!r} during {timeout!s}s: no return code!\n"
'\tSTDOUT:\n'
'{result.stdout_brief}\n'
'\tSTDERR"\n'
diff --git a/exec_helpers/_ssh_client_base.py b/exec_helpers/_ssh_client_base.py
index 8ac7d88..e30ca28 100644
--- a/exec_helpers/_ssh_client_base.py
+++ b/exec_helpers/_ssh_client_base.py
@@ -50,7 +50,6 @@ from exec_helpers import _log_templates
__all__ = ('SSHClientBase', )
-logger = logging.getLogger(__name__)
logging.getLogger('paramiko').setLevel(logging.WARNING)
logging.getLogger('iso8601').setLevel(logging.WARNING)
@@ -59,9 +58,6 @@ _type_ConnectSSH = typing.Union[
]
_type_RSAKeys = typing.Iterable[paramiko.RSAKey]
_type_exit_codes = typing.Union[int, proc_enums.ExitCodes]
-_type_multiple_results = typing.Dict[
- typing.Tuple[str, int], exec_result.ExecResult
-]
_type_execute_async = typing.Tuple[
paramiko.Channel,
paramiko.ChannelFile,
@@ -149,7 +145,7 @@ class _MemorizedSSH(type):
try:
ssh.execute('cd ~', timeout=5)
except BaseException: # Note: Do not change to lower level!
- logger.debug('Reconnect {}'.format(ssh))
+ ssh.logger.debug('Reconnect')
ssh.reconnect()
return ssh
if (
@@ -158,7 +154,7 @@ class _MemorizedSSH(type):
): # pragma: no cover
# If we have only cache reference and temporary getrefcount
# reference: close connection before deletion
- logger.debug('Closing {} as unused'.format(cls.__cache[key]))
+ cls.__cache[key].logger.debug('Closing as unused')
cls.__cache[key].close()
del cls.__cache[key]
# noinspection PyArgumentList
@@ -186,7 +182,7 @@ class _MemorizedSSH(type):
CPYTHON and
sys.getrefcount(ssh) == n_count
): # pragma: no cover
- logger.debug('Closing {} as unused'.format(ssh))
+ ssh.logger.debug('Closing as unused')
ssh.close()
mcs.__cache = {}
@@ -306,7 +302,9 @@ class SSHClientBase(six.with_metaclass(_MemorizedSSH, _api.ExecHelper)):
.. note:: auth has priority over username/password/private_keys
"""
super(SSHClientBase, self).__init__(
- logger=logger.getChild(
+ logger=logging.getLogger(
+ self.__class__.__name__
+ ).getChild(
'{host}:{port}'.format(host=host, port=port)
),
)
@@ -376,7 +374,7 @@ class SSHClientBase(six.with_metaclass(_MemorizedSSH, _api.ExecHelper)):
auth=self.auth
)
- def __str__(self):
+ def __str__(self): # pragma: no cover
"""Representation for debug purposes."""
return '{cls}(host={host}, port={port}) for user {user}'.format(
cls=self.__class__.__name__, host=self.hostname, port=self.port,
@@ -832,7 +830,7 @@ class SSHClientBase(six.with_metaclass(_MemorizedSSH, _api.ExecHelper)):
expected=None, # type: typing.Optional[typing.Iterable[int]]
raise_on_err=True, # type: bool
**kwargs
- ): # type: (...) -> _type_multiple_results
+ ): # type: (...) -> typing.Dict[typing.Tuple[str, int], exec_result.ExecResult]
"""Execute command on multiple remotes in async mode.
:param remotes: Connections to execute on
diff --git a/exec_helpers/ssh_auth.py b/exec_helpers/ssh_auth.py
index 7ab9f98..63ca33b 100644
--- a/exec_helpers/ssh_auth.py
+++ b/exec_helpers/ssh_auth.py
@@ -187,9 +187,7 @@ class SSHAuth(object):
logger.exception('No password has been set!')
raise
else:
- logger.critical(
- 'Unexpected PasswordRequiredException, '
- 'when password is set!')
+ logger.critical('Unexpected PasswordRequiredException, when password is set!')
raise
except (paramiko.AuthenticationException,
paramiko.BadHostKeyException):
| exec_helpers._ssh_client_base is incorrect logger prefix from private section
Should be `exec_helpers.ssh_client` | python-useful-helpers/exec-helpers | diff --git a/test/test_ssh_client.py b/test/test_ssh_client.py
index 3a3c4a3..9119773 100644
--- a/test/test_ssh_client.py
+++ b/test/test_ssh_client.py
@@ -53,7 +53,7 @@ port = 22
username = 'user'
password = 'pass'
command = 'ls ~\nline 2\nline 3\nline с кирилицей'
-command_log = u"Executing command:\n{!s}\n".format(command.rstrip())
+command_log = u"Executing command:\n{!r}\n".format(command.rstrip())
stdout_list = [b' \n', b'2\n', b'3\n', b' \n']
stdout_str = b''.join(stdout_list).strip().decode('utf-8')
stderr_list = [b' \n', b'0\n', b'1\n', b' \n']
@@ -64,7 +64,7 @@ encoded_cmd = base64.b64encode(
print_stdin = 'read line; echo "$line"'
[email protected]('exec_helpers._ssh_client_base.logger', autospec=True)
[email protected]('logging.getLogger', autospec=True)
@mock.patch('paramiko.AutoAddPolicy', autospec=True, return_value='AutoAddPolicy')
@mock.patch('paramiko.SSHClient', autospec=True)
class TestExecute(unittest.TestCase):
@@ -89,8 +89,7 @@ class TestExecute(unittest.TestCase):
@staticmethod
def gen_cmd_result_log_message(result):
- return (u"Command exit code '{code!s}':\n{cmd!s}\n"
- .format(cmd=result.cmd.rstrip(), code=result.exit_code))
+ return u"Command {result.cmd!r} exit code: {result.exit_code!s}".format(result=result)
def test_001_execute_async(self, client, policy, logger):
chan = mock.Mock()
@@ -116,7 +115,8 @@ class TestExecute(unittest.TestCase):
mock.call.makefile_stderr('rb'),
mock.call.exec_command('{}\n'.format(command))
))
- log = logger.getChild('{host}:{port}'.format(host=host, port=port))
+ # raise ValueError(logger.mock_calls)
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port))
self.assertIn(
mock.call.log(level=logging.DEBUG, msg=command_log),
log.mock_calls
@@ -151,7 +151,7 @@ class TestExecute(unittest.TestCase):
mock.call.makefile_stderr('rb'),
mock.call.exec_command('{}\n'.format(command))
))
- log = logger.getChild('{host}:{port}'.format(host=host, port=port))
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port))
self.assertIn(
mock.call.log(level=logging.DEBUG, msg=command_log),
log.mock_calls
@@ -235,7 +235,7 @@ class TestExecute(unittest.TestCase):
"sudo -S bash -c '"
"eval \"$(base64 -d <(echo \"{0}\"))\"'".format(encoded_cmd))
))
- log = logger.getChild('{host}:{port}'.format(host=host, port=port))
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port))
self.assertIn(
mock.call.log(level=logging.DEBUG, msg=command_log),
log.mock_calls
@@ -271,7 +271,7 @@ class TestExecute(unittest.TestCase):
"sudo -S bash -c '"
"eval \"$(base64 -d <(echo \"{0}\"))\"'".format(encoded_cmd))
))
- log = logger.getChild('{host}:{port}'.format(host=host, port=port))
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port))
self.assertIn(
mock.call.log(level=logging.DEBUG, msg=command_log),
log.mock_calls
@@ -303,7 +303,7 @@ class TestExecute(unittest.TestCase):
mock.call.makefile_stderr('rb'),
mock.call.exec_command('{}\n'.format(command))
))
- log = logger.getChild('{host}:{port}'.format(host=host, port=port))
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port))
self.assertIn(
mock.call.log(level=logging.DEBUG, msg=command_log),
log.mock_calls
@@ -335,7 +335,7 @@ class TestExecute(unittest.TestCase):
mock.call.makefile_stderr('rb'),
mock.call.exec_command('{}\n'.format(command))
))
- log = logger.getChild('{host}:{port}'.format(host=host, port=port))
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port))
self.assertIn(
mock.call.log(level=logging.DEBUG, msg=command_log),
log.mock_calls
@@ -380,7 +380,7 @@ class TestExecute(unittest.TestCase):
"sudo -S bash -c '"
"eval \"$(base64 -d <(echo \"{0}\"))\"'".format(encoded_cmd))
))
- log = logger.getChild('{host}:{port}'.format(host=host, port=port))
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port))
self.assertIn(
mock.call.log(level=logging.DEBUG, msg=command_log),
log.mock_calls
@@ -410,7 +410,7 @@ class TestExecute(unittest.TestCase):
mock.call.makefile_stderr('rb'),
mock.call.exec_command('{}\n'.format(command))
))
- log = logger.getChild('{host}:{port}'.format(host=host, port=port))
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port))
self.assertIn(
mock.call.log(level=logging.INFO, msg=command_log),
log.mock_calls
@@ -420,7 +420,7 @@ class TestExecute(unittest.TestCase):
cmd = "USE='secret=secret_pass' do task"
log_mask_re = r"secret\s*=\s*([A-Z-a-z0-9_\-]+)"
masked_cmd = "USE='secret=<*masked*>' do task"
- cmd_log = u"Executing command:\n{!s}\n".format(masked_cmd)
+ cmd_log = u"Executing command:\n{!r}\n".format(masked_cmd)
chan = mock.Mock()
open_session = mock.Mock(return_value=chan)
@@ -445,7 +445,7 @@ class TestExecute(unittest.TestCase):
mock.call.makefile_stderr('rb'),
mock.call.exec_command('{}\n'.format(cmd))
))
- log = logger.getChild('{host}:{port}'.format(host=host, port=port))
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port))
self.assertIn(
mock.call.log(level=logging.DEBUG, msg=cmd_log),
log.mock_calls
@@ -620,7 +620,7 @@ class TestExecute(unittest.TestCase):
open_session.assert_called_once()
stdin.assert_not_called()
- log = logger.getChild('{host}:{port}'.format(host=host, port=port))
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port))
log.warning.assert_called_once_with('STDIN Send failed: closed channel')
self.assertIn(chan, result)
@@ -777,7 +777,7 @@ class TestExecute(unittest.TestCase):
execute_async.assert_called_once_with(command, verbose=False)
chan.assert_has_calls((mock.call.status_event.is_set(), ))
message = self.gen_cmd_result_log_message(result)
- log = logger.getChild('{host}:{port}'.format(host=host, port=port)).log
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port)).log
log.assert_has_calls(
[
mock.call(
@@ -824,7 +824,7 @@ class TestExecute(unittest.TestCase):
chan.assert_has_calls((mock.call.status_event.is_set(), ))
message = self.gen_cmd_result_log_message(result)
- log = logger.getChild('{host}:{port}'.format(host=host, port=port)).log
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port)).log
log.assert_has_calls(
[
mock.call(
@@ -872,7 +872,7 @@ class TestExecute(unittest.TestCase):
execute_async.assert_called_once_with(
command, verbose=False, open_stdout=False)
message = self.gen_cmd_result_log_message(result)
- log = logger.getChild('{host}:{port}'.format(host=host, port=port)).log
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port)).log
log.assert_has_calls(
[
mock.call(
@@ -916,7 +916,7 @@ class TestExecute(unittest.TestCase):
execute_async.assert_called_once_with(
command, verbose=False, open_stderr=False)
message = self.gen_cmd_result_log_message(result)
- log = logger.getChild('{host}:{port}'.format(host=host, port=port)).log
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port)).log
log.assert_has_calls(
[
mock.call(
@@ -968,7 +968,7 @@ class TestExecute(unittest.TestCase):
open_stderr=False
)
message = self.gen_cmd_result_log_message(result)
- log = logger.getChild('{host}:{port}'.format(host=host, port=port)).log
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port)).log
log.assert_has_calls(
[
mock.call(level=logging.DEBUG, msg=message),
@@ -1003,7 +1003,7 @@ class TestExecute(unittest.TestCase):
execute_async.assert_called_once_with(command, verbose=False)
chan.assert_has_calls((mock.call.status_event.is_set(), ))
message = self.gen_cmd_result_log_message(result)
- log = logger.getChild('{host}:{port}'.format(host=host, port=port)).log
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port)).log
self.assertIn(
mock.call(level=logging.DEBUG, msg=message),
log.mock_calls
@@ -1069,7 +1069,7 @@ class TestExecute(unittest.TestCase):
cmd, log_mask_re=log_mask_re, verbose=False)
chan.assert_has_calls((mock.call.status_event.is_set(),))
message = self.gen_cmd_result_log_message(result)
- log = logger.getChild('{host}:{port}'.format(host=host, port=port)).log
+ log = logger(ssh.__class__.__name__).getChild('{host}:{port}'.format(host=host, port=port)).log
log.assert_has_calls(
[
mock.call(
@@ -1297,7 +1297,7 @@ class TestExecute(unittest.TestCase):
error_info=None, raise_on_err=raise_on_err)
[email protected]('exec_helpers._ssh_client_base.logger', autospec=True)
[email protected]('logging.getLogger', autospec=True)
@mock.patch('paramiko.AutoAddPolicy', autospec=True, return_value='AutoAddPolicy')
@mock.patch('paramiko.SSHClient', autospec=True)
@mock.patch('paramiko.Transport', autospec=True)
@@ -1528,7 +1528,7 @@ class TestExecuteThrowHost(unittest.TestCase):
))
[email protected]('exec_helpers._ssh_client_base.logger', autospec=True)
[email protected]('logging.getLogger', autospec=True)
@mock.patch('paramiko.AutoAddPolicy', autospec=True, return_value='AutoAddPolicy')
@mock.patch('paramiko.SSHClient', autospec=True)
class TestSftp(unittest.TestCase):
diff --git a/test/test_ssh_client_init.py b/test/test_ssh_client_init.py
index 8117eff..a5f400d 100644
--- a/test/test_ssh_client_init.py
+++ b/test/test_ssh_client_init.py
@@ -381,9 +381,10 @@ class TestSSHClientInit(unittest.TestCase):
_ssh.attach_mock(mock.Mock(return_value=_sftp), 'open_sftp')
with mock.patch(
- 'exec_helpers._ssh_client_base.logger',
+ 'logging.getLogger',
autospec=True
- ) as ssh_logger:
+ ) as get_logger:
+ ssh_logger = get_logger(exec_helpers.SSHClient.__name__)
ssh = exec_helpers.SSHClient(
host=host,
@@ -408,13 +409,13 @@ class TestSSHClientInit(unittest.TestCase):
ssh.close()
- log = ssh_logger.getChild(
- '{host}:{port}'.format(host=host, port=port)
- )
- log.assert_has_calls((
- mock.call.exception('Could not close ssh connection'),
- mock.call.exception('Could not close sftp connection'),
- ))
+ log = ssh_logger.getChild(
+ '{host}:{port}'.format(host=host, port=port)
+ )
+ log.assert_has_calls((
+ mock.call.exception('Could not close ssh connection'),
+ mock.call.exception('Could not close sftp connection'),
+ ))
def test_014_init_reconnect(self, client, policy, logger):
"""Test reconnect
@@ -619,9 +620,10 @@ class TestSSHClientInit(unittest.TestCase):
client.return_value = _ssh
with mock.patch(
- 'exec_helpers._ssh_client_base.logger',
+ 'logging.getLogger',
autospec=True
- ) as ssh_logger:
+ ) as get_logger:
+ ssh_logger = get_logger(exec_helpers.SSHClient.__name__)
ssh = exec_helpers.SSHClient(
host=host, auth=exec_helpers.SSHAuth(password=password))
@@ -631,14 +633,14 @@ class TestSSHClientInit(unittest.TestCase):
# noinspection PyStatementEffect
ssh._sftp
# pylint: enable=pointless-statement
- log = ssh_logger.getChild(
- '{host}:{port}'.format(host=host, port=port)
- )
- log.assert_has_calls((
- mock.call.debug('SFTP is not connected, try to connect...'),
- mock.call.warning(
- 'SFTP enable failed! SSH only is accessible.'),
- ))
+ log = ssh_logger.getChild(
+ '{host}:{port}'.format(host=host, port=port)
+ )
+ log.assert_has_calls((
+ mock.call.debug('SFTP is not connected, try to connect...'),
+ mock.call.warning(
+ 'SFTP enable failed! SSH only is accessible.'),
+ ))
def test_022_init_sftp_repair(self, client, policy, logger):
_sftp = mock.Mock()
@@ -652,9 +654,10 @@ class TestSSHClientInit(unittest.TestCase):
client.return_value = _ssh
with mock.patch(
- 'exec_helpers._ssh_client_base.logger',
+ 'logging.getLogger',
autospec=True
- ) as ssh_logger:
+ ) as get_logger:
+ ssh_logger = get_logger(exec_helpers.SSHClient.__name__)
ssh = exec_helpers.SSHClient(
host=host, auth=exec_helpers.SSHAuth(password=password)
@@ -670,12 +673,12 @@ class TestSSHClientInit(unittest.TestCase):
sftp = ssh._sftp
self.assertEqual(sftp, open_sftp())
- log = ssh_logger.getChild(
- '{host}:{port}'.format(host=host, port=port)
- )
- log.assert_has_calls((
- mock.call.debug('SFTP is not connected, try to connect...'),
- ))
+ log = ssh_logger.getChild(
+ '{host}:{port}'.format(host=host, port=port)
+ )
+ log.assert_has_calls((
+ mock.call.debug('SFTP is not connected, try to connect...'),
+ ))
@mock.patch('exec_helpers.exec_result.ExecResult', autospec=True)
def test_023_init_memorize(
diff --git a/test/test_sshauth.py b/test/test_sshauth.py
index 0c4ce5f..60c1cfe 100644
--- a/test/test_sshauth.py
+++ b/test/test_sshauth.py
@@ -36,10 +36,7 @@ import exec_helpers
def gen_private_keys(amount=1):
- keys = []
- for _ in range(amount):
- keys.append(paramiko.RSAKey.generate(1024))
- return keys
+ return [paramiko.RSAKey.generate(1024) for _ in range(amount)]
def gen_public_key(private_key=None):
diff --git a/test/test_subprocess_runner.py b/test/test_subprocess_runner.py
index ef3449a..04555c7 100644
--- a/test/test_subprocess_runner.py
+++ b/test/test_subprocess_runner.py
@@ -32,7 +32,7 @@ import exec_helpers
from exec_helpers import subprocess_runner
command = 'ls ~\nline 2\nline 3\nline с кирилицей'
-command_log = u"Executing command:\n{!s}\n".format(command.rstrip())
+command_log = u"Executing command:\n{!r}\n".format(command.rstrip())
stdout_list = [b' \n', b'2\n', b'3\n', b' \n']
stderr_list = [b' \n', b'0\n', b'1\n', b' \n']
print_stdin = 'read line; echo "$line"'
@@ -105,8 +105,7 @@ class TestSubprocessRunner(unittest.TestCase):
@staticmethod
def gen_cmd_result_log_message(result):
- return ("Command exit code '{code!s}':\n{cmd!s}\n"
- .format(cmd=result.cmd.rstrip(), code=result.exit_code))
+ return u"Command {result.cmd!r} exit code: {result.exit_code!s}".format(result=result)
def test_001_call(
self,
@@ -369,7 +368,7 @@ class TestSubprocessRunner(unittest.TestCase):
cmd = "USE='secret=secret_pass' do task"
log_mask_re = r"secret\s*=\s*([A-Z-a-z0-9_\-]+)"
masked_cmd = "USE='secret=<*masked*>' do task"
- cmd_log = u"Executing command:\n{!s}\n".format(masked_cmd)
+ cmd_log = u"Executing command:\n{!r}\n".format(masked_cmd)
popen_obj, exp_result = self.prepare_close(
popen,
@@ -424,7 +423,7 @@ class TestSubprocessRunner(unittest.TestCase):
cmd = "USE='secret=secret_pass' do task"
log_mask_re = r"secret\s*=\s*([A-Z-a-z0-9_\-]+)"
masked_cmd = "USE='secret=<*masked*>' do task"
- cmd_log = u"Executing command:\n{!s}\n".format(masked_cmd)
+ cmd_log = u"Executing command:\n{!r}\n".format(masked_cmd)
popen_obj, exp_result = self.prepare_close(
popen,
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 4
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-html",
"mock"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | advanced-descriptors==4.0.3
bcrypt==4.3.0
cffi==1.17.1
coverage==7.8.0
cryptography==44.0.2
exceptiongroup==1.2.2
-e git+https://github.com/python-useful-helpers/exec-helpers.git@63166d1ac340be47d64488a5b84a9d6fa317e8fe#egg=exec_helpers
iniconfig==2.1.0
Jinja2==3.1.6
MarkupSafe==3.0.2
mock==5.2.0
packaging==24.2
paramiko==3.5.1
pluggy==1.5.0
pycparser==2.22
PyNaCl==1.5.0
pytest==8.3.5
pytest-cov==6.0.0
pytest-html==4.1.1
pytest-metadata==3.1.1
PyYAML==6.0.2
six==1.17.0
tenacity==9.0.0
threaded==4.2.0
tomli==2.2.1
| name: exec-helpers
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- advanced-descriptors==4.0.3
- bcrypt==4.3.0
- cffi==1.17.1
- coverage==7.8.0
- cryptography==44.0.2
- exceptiongroup==1.2.2
- exec-helpers==1.2.1
- iniconfig==2.1.0
- jinja2==3.1.6
- markupsafe==3.0.2
- mock==5.2.0
- packaging==24.2
- paramiko==3.5.1
- pluggy==1.5.0
- pycparser==2.22
- pynacl==1.5.0
- pytest==8.3.5
- pytest-cov==6.0.0
- pytest-html==4.1.1
- pytest-metadata==3.1.1
- pyyaml==6.0.2
- six==1.17.0
- tenacity==9.0.0
- threaded==4.2.0
- tomli==2.2.1
prefix: /opt/conda/envs/exec-helpers
| [
"test/test_ssh_client.py::TestExecute::test_001_execute_async",
"test/test_ssh_client.py::TestExecute::test_002_execute_async_pty",
"test/test_ssh_client.py::TestExecute::test_004_execute_async_sudo",
"test/test_ssh_client.py::TestExecute::test_005_execute_async_with_sudo_enforce",
"test/test_ssh_client.py::TestExecute::test_006_execute_async_with_no_sudo_enforce",
"test/test_ssh_client.py::TestExecute::test_007_execute_async_with_sudo_none_enforce",
"test/test_ssh_client.py::TestExecute::test_008_execute_async_sudo_password",
"test/test_ssh_client.py::TestExecute::test_009_execute_async_verbose",
"test/test_ssh_client.py::TestExecute::test_010_execute_async_mask_command",
"test/test_ssh_client.py::TestExecute::test_014_check_stdin_closed",
"test/test_ssh_client.py::TestExecute::test_019_execute",
"test/test_ssh_client.py::TestExecute::test_020_execute_verbose",
"test/test_ssh_client.py::TestExecute::test_021_execute_no_stdout",
"test/test_ssh_client.py::TestExecute::test_022_execute_no_stderr",
"test/test_ssh_client.py::TestExecute::test_023_execute_no_stdout_stderr",
"test/test_ssh_client.py::TestExecute::test_024_execute_timeout",
"test/test_ssh_client.py::TestExecute::test_026_execute_mask_command",
"test/test_ssh_client_init.py::TestSSHClientInit::test_013_init_clear_failed",
"test/test_ssh_client_init.py::TestSSHClientInit::test_021_init_no_sftp",
"test/test_ssh_client_init.py::TestSSHClientInit::test_022_init_sftp_repair",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_001_call",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_002_call_verbose",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_005_execute_no_stdout",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_006_execute_no_stderr",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_007_execute_no_stdout_stderr",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_008_execute_mask_global",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_009_execute_mask_local"
] | [] | [
"test/test_ssh_client.py::TestExecute::test_003_execute_async_no_stdout_stderr",
"test/test_ssh_client.py::TestExecute::test_011_check_stdin_str",
"test/test_ssh_client.py::TestExecute::test_012_check_stdin_bytes",
"test/test_ssh_client.py::TestExecute::test_013_check_stdin_bytearray",
"test/test_ssh_client.py::TestExecute::test_015_keepalive",
"test/test_ssh_client.py::TestExecute::test_016_no_keepalive",
"test/test_ssh_client.py::TestExecute::test_017_keepalive_enforced",
"test/test_ssh_client.py::TestExecute::test_018_no_keepalive_enforced",
"test/test_ssh_client.py::TestExecute::test_025_execute_timeout_fail",
"test/test_ssh_client.py::TestExecute::test_027_execute_together",
"test/test_ssh_client.py::TestExecute::test_028_execute_together_exceptions",
"test/test_ssh_client.py::TestExecute::test_029_check_call",
"test/test_ssh_client.py::TestExecute::test_030_check_call_expected",
"test/test_ssh_client.py::TestExecute::test_031_check_stderr",
"test/test_ssh_client.py::TestExecuteThrowHost::test_01_execute_through_host_no_creds",
"test/test_ssh_client.py::TestExecuteThrowHost::test_02_execute_through_host_auth",
"test/test_ssh_client.py::TestExecuteThrowHost::test_03_execute_through_host_get_pty",
"test/test_ssh_client.py::TestSftp::test_download",
"test/test_ssh_client.py::TestSftp::test_exists",
"test/test_ssh_client.py::TestSftp::test_isdir",
"test/test_ssh_client.py::TestSftp::test_isfile",
"test/test_ssh_client.py::TestSftp::test_mkdir",
"test/test_ssh_client.py::TestSftp::test_open",
"test/test_ssh_client.py::TestSftp::test_rm_rf",
"test/test_ssh_client.py::TestSftp::test_stat",
"test/test_ssh_client.py::TestSftp::test_upload_dir",
"test/test_ssh_client.py::TestSftp::test_upload_file",
"test/test_ssh_client_init.py::TestSSHClientInit::test_001_init_host",
"test/test_ssh_client_init.py::TestSSHClientInit::test_002_init_alternate_port",
"test/test_ssh_client_init.py::TestSSHClientInit::test_003_init_username",
"test/test_ssh_client_init.py::TestSSHClientInit::test_004_init_username_password",
"test/test_ssh_client_init.py::TestSSHClientInit::test_005_init_username_password_empty_keys",
"test/test_ssh_client_init.py::TestSSHClientInit::test_006_init_username_single_key",
"test/test_ssh_client_init.py::TestSSHClientInit::test_007_init_username_password_single_key",
"test/test_ssh_client_init.py::TestSSHClientInit::test_008_init_username_multiple_keys",
"test/test_ssh_client_init.py::TestSSHClientInit::test_009_init_username_password_multiple_keys",
"test/test_ssh_client_init.py::TestSSHClientInit::test_010_init_auth",
"test/test_ssh_client_init.py::TestSSHClientInit::test_011_init_auth_break",
"test/test_ssh_client_init.py::TestSSHClientInit::test_012_init_context",
"test/test_ssh_client_init.py::TestSSHClientInit::test_014_init_reconnect",
"test/test_ssh_client_init.py::TestSSHClientInit::test_015_init_password_required",
"test/test_ssh_client_init.py::TestSSHClientInit::test_016_init_password_broken",
"test/test_ssh_client_init.py::TestSSHClientInit::test_017_init_auth_impossible_password",
"test/test_ssh_client_init.py::TestSSHClientInit::test_018_init_auth_impossible_key",
"test/test_ssh_client_init.py::TestSSHClientInit::test_019_init_auth_pass_no_key",
"test/test_ssh_client_init.py::TestSSHClientInit::test_020_init_auth_brute_impossible",
"test/test_ssh_client_init.py::TestSSHClientInit::test_023_init_memorize",
"test/test_ssh_client_init.py::TestSSHClientInit::test_024_init_memorize_close_unused",
"test/test_ssh_client_init.py::TestSSHClientInit::test_025_init_memorize_reconnect",
"test/test_sshauth.py::TestSSHAuth::test_equality_copy",
"test/test_sshauth.py::TestSSHAuth::test_init_username_key",
"test/test_sshauth.py::TestSSHAuth::test_init_username_only",
"test/test_sshauth.py::TestSSHAuth::test_init_username_password",
"test/test_sshauth.py::TestSSHAuth::test_init_username_password_key",
"test/test_sshauth.py::TestSSHAuth::test_init_username_password_key_keys",
"test/test_sshauth.py::TestSSHAuth::test_init_username_password_keys",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_003_context_manager",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_004_check_stdin_str",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_004_execute_timeout_fail",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_005_check_stdin_bytes",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_006_check_stdin_bytearray",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_007_check_stdin_fail_broken_pipe",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_008_check_stdin_fail_closed_win",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_009_check_stdin_fail_write",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_010_check_stdin_fail_close_pipe",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_011_check_stdin_fail_close_pipe_win",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_012_check_stdin_fail_close",
"test/test_subprocess_runner.py::TestSubprocessRunner::test_013_execute_timeout_done",
"test/test_subprocess_runner.py::TestSubprocessRunnerHelpers::test_001_check_call",
"test/test_subprocess_runner.py::TestSubprocessRunnerHelpers::test_002_check_call_expected",
"test/test_subprocess_runner.py::TestSubprocessRunnerHelpers::test_003_check_stderr"
] | [] | Apache License 2.0 | 2,515 | 1,774 | [
"exec_helpers/_api.py",
"exec_helpers/_log_templates.py",
"exec_helpers/_ssh_client_base.py",
"exec_helpers/ssh_auth.py"
] |
|
ofek__pypinfo-49 | 72ea0a3e5669757e3c625ea2f1f0e3463d11db86 | 2018-05-15 15:00:40 | 2a0628e63b50def718228a6b5b87a0e83b7cbf01 | hugovk: Passing CI build: https://travis-ci.org/hugovk/pypinfo/builds/379256544
ofek: Thanks so much! | diff --git a/pypinfo/core.py b/pypinfo/core.py
index f1ba663..5c633a4 100644
--- a/pypinfo/core.py
+++ b/pypinfo/core.py
@@ -12,13 +12,17 @@ FROM = """\
FROM
TABLE_DATE_RANGE(
[the-psf:pypi.downloads],
- DATE_ADD(CURRENT_TIMESTAMP(), {}, "day"),
- DATE_ADD(CURRENT_TIMESTAMP(), {}, "day")
+ {},
+ {}
)
"""
+DATE_ADD = 'DATE_ADD(CURRENT_TIMESTAMP(), {}, "day")'
+START_TIMESTAMP = 'TIMESTAMP("{} 00:00:00")'
+END_TIMESTAMP = 'TIMESTAMP("{} 23:59:59")'
START_DATE = '-31'
END_DATE = '-1'
DEFAULT_LIMIT = '10'
+YYYY_MM_DD = re.compile("^[0-9]{4}-[01][0-9]-[0-3][0-9]$")
def create_config():
@@ -42,6 +46,28 @@ def create_client(creds_file=None):
return Client.from_service_account_json(creds_file, project=project)
+def validate_date(date):
+ valid = False
+ try:
+ if int(date) < 0:
+ valid = True
+ except ValueError:
+ if YYYY_MM_DD.match(date):
+ valid = True
+
+ if not valid:
+ raise ValueError('Dates must be negative integers or YYYY-MM-DD in the past.')
+ return valid
+
+
+def format_date(date, timestamp_format):
+ try:
+ date = DATE_ADD.format(int(date))
+ except ValueError:
+ date = timestamp_format.format(date)
+ return date
+
+
def build_query(project, all_fields, start_date=None, end_date=None,
days=None, limit=None, where=None, order=None, pip=None):
project = normalize(project)
@@ -53,11 +79,18 @@ def build_query(project, all_fields, start_date=None, end_date=None,
if days:
start_date = str(int(end_date) - int(days))
- if int(start_date) > 0 or int(end_date) > 0:
- raise ValueError('Dates must be in the past (negative).')
+ validate_date(start_date)
+ validate_date(end_date)
+
+ try:
+ if int(start_date) >= int(end_date):
+ raise ValueError('End date must be greater than start date.')
+ except ValueError:
+ # Not integers, must be yyyy-mm-dd
+ pass
- if int(start_date) >= int(end_date):
- raise ValueError('End date must be greater than start date.')
+ start_date = format_date(start_date, START_TIMESTAMP)
+ end_date = format_date(end_date, END_TIMESTAMP)
fields = []
used_fields = set()
| Allow YYYY-MM-DD dates in --start-date and --end-date
It'd be handy to be able to use `YYYY-MM-DD` dates as the start and end date. For example:
```console
$ pypinfo --start-date 2018-01-01 --end-date 2018-01-31 pillow pyversion
```
Rather than having to work it out:
```console
$ pypinfo --start-date -43 --end-date -14 pillow pyversion
```
It wouldn't necessarily have to reuse `--start-date` and `--end-date`, but that's probably clearest and easiest (if not negative integer, it's a date).
What do you think?
| ofek/pypinfo | diff --git a/tests/test_core.py b/tests/test_core.py
index 0ec1b9a..121b80b 100644
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -1,3 +1,6 @@
+import copy
+import pytest
+
from pypinfo import core
ROWS = [
@@ -16,7 +19,7 @@ ROWS = [
def test_tabulate_default():
# Arrange
- rows = list(ROWS)
+ rows = copy.deepcopy(ROWS)
expected = """\
| python_version | percent | download_count |
| -------------- | ------- | -------------- |
@@ -40,7 +43,7 @@ def test_tabulate_default():
def test_tabulate_markdown():
# Arrange
- rows = list(ROWS)
+ rows = copy.deepcopy(ROWS)
expected = """\
| python_version | percent | download_count |
| -------------- | ------: | -------------: |
@@ -60,3 +63,50 @@ def test_tabulate_markdown():
# Assert
assert tabulated == expected
+
+
+def test_validate_date_negative_number():
+ # Act
+ valid = core.validate_date("-1")
+
+ # Assert
+ assert valid
+
+
+def test_validate_date_positive_number():
+ # Act / Assert
+ with pytest.raises(ValueError):
+ core.validate_date("1")
+
+
+def test_validate_date_yyyy_mm_dd():
+ # Act
+ valid = core.validate_date("2018-05-15")
+
+ # Assert
+ assert valid
+
+
+def test_validate_date_other_string():
+ # Act / Assert
+ with pytest.raises(ValueError):
+ core.validate_date("somthing invalid")
+
+
+def test_format_date_negative_number():
+ # Arrange
+ dummy_format = "dummy format {}"
+
+ # Act
+ date = core.format_date("-1", dummy_format)
+
+ # Assert
+ assert date == 'DATE_ADD(CURRENT_TIMESTAMP(), -1, "day")'
+
+
+def test_format_date_yyy_mm_dd():
+ # Act
+ date = core.format_date("2018-05-15", core.START_TIMESTAMP)
+
+ # Assert
+ assert date == 'TIMESTAMP("2018-05-15 00:00:00")'
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | 14.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | appdirs==1.4.4
binary==1.0.1
cachetools==5.5.2
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
google-api-core==2.24.2
google-auth==2.38.0
google-cloud-bigquery==3.31.0
google-cloud-core==2.4.3
google-crc32c==1.7.1
google-resumable-media==2.7.2
googleapis-common-protos==1.69.2
grpcio==1.71.0
grpcio-status==1.71.0
idna==3.10
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
proto-plus==1.26.1
protobuf==5.29.4
pyasn1==0.6.1
pyasn1_modules==0.4.2
-e git+https://github.com/ofek/pypinfo.git@72ea0a3e5669757e3c625ea2f1f0e3463d11db86#egg=pypinfo
pytest @ file:///croot/pytest_1738938843180/work
python-dateutil==2.9.0.post0
requests==2.32.3
rsa==4.9
six==1.17.0
tinydb==4.8.2
tinyrecord==0.2.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
urllib3==2.3.0
| name: pypinfo
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- appdirs==1.4.4
- binary==1.0.1
- cachetools==5.5.2
- certifi==2025.1.31
- charset-normalizer==3.4.1
- click==8.1.8
- google-api-core==2.24.2
- google-auth==2.38.0
- google-cloud-bigquery==3.31.0
- google-cloud-core==2.4.3
- google-crc32c==1.7.1
- google-resumable-media==2.7.2
- googleapis-common-protos==1.69.2
- grpcio==1.71.0
- grpcio-status==1.71.0
- idna==3.10
- proto-plus==1.26.1
- protobuf==5.29.4
- pyasn1==0.6.1
- pyasn1-modules==0.4.2
- python-dateutil==2.9.0.post0
- requests==2.32.3
- rsa==4.9
- six==1.17.0
- tinydb==4.8.2
- tinyrecord==0.2.0
- urllib3==2.3.0
prefix: /opt/conda/envs/pypinfo
| [
"tests/test_core.py::test_validate_date_negative_number",
"tests/test_core.py::test_validate_date_positive_number",
"tests/test_core.py::test_validate_date_yyyy_mm_dd",
"tests/test_core.py::test_validate_date_other_string",
"tests/test_core.py::test_format_date_negative_number",
"tests/test_core.py::test_format_date_yyy_mm_dd"
] | [] | [
"tests/test_core.py::test_tabulate_default",
"tests/test_core.py::test_tabulate_markdown"
] | [] | MIT License | 2,525 | 649 | [
"pypinfo/core.py"
] |
streamlink__streamlink-1660 | e2a55461decc6856912325e8103cefb359027811 | 2018-05-16 14:48:22 | 060d38d3f0acc2c4f3b463ea988361622a9b6544 | codecov[bot]: # [Codecov](https://codecov.io/gh/streamlink/streamlink/pull/1660?src=pr&el=h1) Report
> Merging [#1660](https://codecov.io/gh/streamlink/streamlink/pull/1660?src=pr&el=desc) into [master](https://codecov.io/gh/streamlink/streamlink/commit/e2a55461decc6856912325e8103cefb359027811?src=pr&el=desc) will **decrease** coverage by `0.17%`.
> The diff coverage is `100%`.
```diff
@@ Coverage Diff @@
## master #1660 +/- ##
==========================================
- Coverage 33.17% 32.99% -0.18%
==========================================
Files 229 229
Lines 12898 12898
==========================================
- Hits 4279 4256 -23
- Misses 8619 8642 +23
```
karlo2105: Thanks.
gravyboat: Nice, thanks @beardypig. | diff --git a/src/streamlink/plugins/tf1.py b/src/streamlink/plugins/tf1.py
index 189f124c..88b5e585 100644
--- a/src/streamlink/plugins/tf1.py
+++ b/src/streamlink/plugins/tf1.py
@@ -9,13 +9,20 @@ from streamlink.stream import HLSStream
class TF1(Plugin):
- url_re = re.compile(r"https?://(?:www\.)?(?:tf1\.fr/(tf1|tmc|tfx|tf1-series-films)/direct|(lci).fr/direct)/?")
+ url_re = re.compile(r"https?://(?:www\.)?(?:tf1\.fr/([\w-]+)/direct|(lci).fr/direct)/?")
embed_url = "http://www.wat.tv/embedframe/live{0}"
embed_re = re.compile(r"urlLive.*?:.*?\"(http.*?)\"", re.MULTILINE)
api_url = "http://www.wat.tv/get/{0}/591997"
swf_url = "http://www.wat.tv/images/v70/PlayerLite.swf"
- hds_channel_remap = {"tf1": "androidliveconnect", "lci": "androidlivelci", "tfx" : "nt1live", "tf1-series-films" : "hd1live" }
- hls_channel_remap = {"lci": "LCI", "tf1": "V4", "tfx" : "nt1", "tf1-series-films" : "hd1" }
+ hds_channel_remap = {"tf1": "androidliveconnect",
+ "lci": "androidlivelci",
+ "tfx": "nt1live",
+ "hd1": "hd1live", # renamed to tfx
+ "tf1-series-films": "hd1live"}
+ hls_channel_remap = {"lci": "LCI",
+ "tf1": "V4",
+ "tfx": "nt1",
+ "tf1-series-films": "hd1"}
@classmethod
def can_handle_url(cls, url):
@@ -23,6 +30,7 @@ class TF1(Plugin):
def _get_hds_streams(self, channel):
channel = self.hds_channel_remap.get(channel, "{0}live".format(channel))
+ self.logger.debug("Using HDS channel name: {0}".format(channel))
manifest_url = http.get(self.api_url.format(channel),
params={"getURL": 1},
headers={"User-Agent": useragents.FIREFOX}).text
| LCI not covered by TF1 home page anymore
### Checklist
- [ ] This is a bug report.
- [ ] This is a feature request.
- [x] This is a plugin (improvement) request.
- [ ] I have read the contribution guidelines.
### Description
I pointed out since a while that tf1 plugin doesn't cover anymore LCI from tf1.fr home website. It did before.
### Expected / Actual behavior
Here I point out result with older tf1 plugin :
```
streamlink "https://www.tf1.fr/lci/direct"
[cli][info] Found matching plugin tf1 for URL https://www.tf1.fr/lci/direct
error: Unable to open URL: http://lcilivhlshdslive-lh.akamaihd.net/z/lci_1@30158
5/manifest.f4m?hdnea=st=1526479986~exp=1526481786~acl=/*~hmac=207f41547435bb3422
e9f51af166cae855bdbb387ac875524827deb528999d9e (403 Client Error: Forbidden for
url: http://lcilivhlshdslive-lh.akamaihd.net/z/lci_1@301585/manifest.f4m?hdnea=s
t=1526479986~exp=1526481786~acl=/*~hmac=207f41547435bb3422e9f51af166cae855bdbb38
7ac875524827deb528999d9e&g=DSCLJVQYJHGR&hdcore=3.1.0)
```
The latest tf1 plugin gives such result :
```
streamlink "https://www.tf1.fr/lci/direct"
[cli][info] Found matching plugin resolve for URL https://www.tf1.fr/lci/direct
[plugin.resolve][info] Found iframes:
Traceback (most recent call last):
File "C:\Program Files\Python27\Scripts\streamlink-script.py", line 11, in <mo
dule>
load_entry_point('streamlink==0.12.1+8.ge2a5546', 'console_scripts', 'stream
link')()
File "c:\program files\python27\lib\site-packages\streamlink_cli\main.py", lin
e 1113, in main
handle_url()
File "c:\program files\python27\lib\site-packages\streamlink_cli\main.py", lin
e 505, in handle_url
streams = fetch_streams(plugin)
File "c:\program files\python27\lib\site-packages\streamlink_cli\main.py", lin
e 402, in fetch_streams
sorting_excludes=args.stream_sorting_excludes)
File "c:\program files\python27\lib\site-packages\streamlink\plugin\plugin.py"
, line 385, in get_streams
return self.streams(*args, **kwargs)
File "c:\program files\python27\lib\site-packages\streamlink\plugin\plugin.py"
, line 288, in streams
ostreams = self._get_streams()
File "c:\program files\python27\lib\site-packages\streamlink\plugins\resolve.p
y", line 480, in _get_streams
IndexError: list index out of range
```
### Reproduction steps / Explicit stream URLs to test
1. ` streamlink "https://www.tf1.fr/lci/direct"`
### Logs
```
streamlink -l debug
[cli][debug] OS: Windows 7
[cli][debug] Python: 2.7.13
[cli][debug] Streamlink: 0.12.1+8.ge2a5546
[cli][debug] Requests(2.18.4), Socks(1.6.7), Websocket(0.46.0)
```
Both tests were made with the latest streamlink build, I just replaced newer tf1 plugin with older.
Thanks for up.
| streamlink/streamlink | diff --git a/tests/test_plugin_tf1.py b/tests/test_plugin_tf1.py
index 77afd8d8..f8e48790 100644
--- a/tests/test_plugin_tf1.py
+++ b/tests/test_plugin_tf1.py
@@ -12,11 +12,11 @@ class TestPluginTF1(unittest.TestCase):
self.assertTrue(TF1.can_handle_url("http://lci.fr/direct"))
self.assertTrue(TF1.can_handle_url("http://www.lci.fr/direct"))
self.assertTrue(TF1.can_handle_url("http://tf1.fr/tmc/direct"))
+ self.assertTrue(TF1.can_handle_url("http://tf1.fr/lci/direct"))
+ def test_can_handle_url_negative(self):
# shouldn't match
self.assertFalse(TF1.can_handle_url("http://tf1.fr/direct"))
-# self.assertFalse(TF1.can_handle_url("http://tf1.fr/nt1/direct")) NOTE : TF1 redirect old channel names to new ones (for now).
-# self.assertFalse(TF1.can_handle_url("http://tf1.fr/hd1/direct"))
self.assertFalse(TF1.can_handle_url("http://www.tf1.fr/direct"))
self.assertFalse(TF1.can_handle_url("http://www.tvcatchup.com/"))
self.assertFalse(TF1.can_handle_url("http://www.youtube.com/"))
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_git_commit_hash"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"codecov",
"coverage",
"mock",
"requests-mock",
"pynsist",
"unittest2"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"dev-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
codecov==2.1.13
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
iso-639==0.4.5
iso3166==2.1.1
Jinja2==3.1.6
linecache2==1.0.0
MarkupSafe==3.0.2
mock==5.2.0
packaging==24.2
pluggy==1.5.0
pycryptodome==3.22.0
pynsist==2.8
PySocks==1.7.1
pytest==8.3.5
pytest-cov==6.0.0
requests==2.32.3
requests-mock==1.12.1
requests_download==0.1.2
six==1.17.0
-e git+https://github.com/streamlink/streamlink.git@e2a55461decc6856912325e8103cefb359027811#egg=streamlink
tomli==2.2.1
traceback2==1.4.0
unittest2==1.1.0
urllib3==2.3.0
websocket-client==1.8.0
yarg==0.1.10
| name: streamlink
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- argparse==1.4.0
- certifi==2025.1.31
- charset-normalizer==3.4.1
- codecov==2.1.13
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- iso-639==0.4.5
- iso3166==2.1.1
- jinja2==3.1.6
- linecache2==1.0.0
- markupsafe==3.0.2
- mock==5.2.0
- packaging==24.2
- pluggy==1.5.0
- pycryptodome==3.22.0
- pynsist==2.8
- pysocks==1.7.1
- pytest==8.3.5
- pytest-cov==6.0.0
- requests==2.32.3
- requests-download==0.1.2
- requests-mock==1.12.1
- six==1.17.0
- tomli==2.2.1
- traceback2==1.4.0
- unittest2==1.1.0
- urllib3==2.3.0
- websocket-client==1.8.0
- yarg==0.1.10
prefix: /opt/conda/envs/streamlink
| [
"tests/test_plugin_tf1.py::TestPluginTF1::test_can_handle_url"
] | [] | [
"tests/test_plugin_tf1.py::TestPluginTF1::test_can_handle_url_negative"
] | [] | BSD 2-Clause "Simplified" License | 2,532 | 604 | [
"src/streamlink/plugins/tf1.py"
] |
Yelp__swagger_spec_validator-93 | 40e1cc926775777ff2d56e271fd61697c6235579 | 2018-05-16 17:24:26 | 40e1cc926775777ff2d56e271fd61697c6235579 | diff --git a/swagger_spec_validator/validator20.py b/swagger_spec_validator/validator20.py
index fe17ded..77920c1 100644
--- a/swagger_spec_validator/validator20.py
+++ b/swagger_spec_validator/validator20.py
@@ -7,6 +7,7 @@ from __future__ import unicode_literals
import functools
import logging
import string
+from collections import defaultdict
from jsonschema.validators import Draft4Validator
from jsonschema.validators import RefResolver
@@ -196,6 +197,8 @@ def validate_apis(apis, deref):
:raises: :py:class:`swagger_spec_validator.SwaggerValidationError`
:raises: :py:class:`jsonschema.exceptions.ValidationError`
"""
+ operation_tag_to_operation_id_set = defaultdict(set)
+
for api_name, api_body in iteritems(apis):
api_body = deref(api_body)
api_params = deref(api_body.get('parameters', []))
@@ -206,6 +209,20 @@ def validate_apis(apis, deref):
if oper_name == 'parameters' or oper_name.startswith('x-'):
continue
oper_body = deref(api_body[oper_name])
+ oper_tags = deref(oper_body.get('tags', [None]))
+
+ # Check that, if this operation has an operationId defined,
+ # no other operation with a same tag also has that
+ # operationId.
+ operation_id = oper_body.get('operationId')
+ if operation_id is not None:
+ for oper_tag in oper_tags:
+ if operation_id in operation_tag_to_operation_id_set[oper_tag]:
+ raise SwaggerValidationError(
+ "Duplicate operationId: {}".format(operation_id)
+ )
+ operation_tag_to_operation_id_set[oper_tag].add(operation_id)
+
oper_params = deref(oper_body.get('parameters', []))
validate_duplicate_param(oper_params, deref)
all_path_params = list(set(
| Validator does not check uniqueness of operation ids
According to the Swagger spec, the `operationId` of an operation object is:
> Unique string used to identify the operation. The id MUST be unique among all operations described in the API. Tools and libraries MAY use the operationId to uniquely identify an operation, therefore, it is recommended to follow common programming naming conventions.
The validator does not currently check that `operationId`s are unique across the API. This would be a helpful feature because some codegen tools fail if this constraint is not met. | Yelp/swagger_spec_validator | diff --git a/tests/validator20/validate_apis_test.py b/tests/validator20/validate_apis_test.py
index 56f1e14..d1198d6 100644
--- a/tests/validator20/validate_apis_test.py
+++ b/tests/validator20/validate_apis_test.py
@@ -152,3 +152,91 @@ def test_api_check_default_fails(partial_parameter_spec, validator, instance):
validation_error = excinfo.value.args[1]
assert validation_error.instance == instance
assert validation_error.validator == validator
+
+
[email protected](
+ 'apis',
+ [
+ {
+ '/api': {
+ 'get': {
+ 'operationId': 'duplicateOperationId',
+ 'responses': {},
+ },
+ 'post': {
+ 'operationId': 'duplicateOperationId',
+ 'responses': {},
+ },
+ },
+ },
+ {
+ '/api1': {
+ 'get': {
+ 'operationId': 'duplicateOperationId',
+ 'responses': {},
+ },
+ },
+ '/api2': {
+ 'get': {
+ 'operationId': 'duplicateOperationId',
+ 'responses': {},
+ },
+ },
+ },
+ {
+ '/api1': {
+ 'get': {
+ 'operationId': 'duplicateOperationId',
+ 'tags': ['tag1', 'tag2'],
+ 'responses': {},
+ },
+ },
+ '/api2': {
+ 'get': {
+ 'operationId': 'duplicateOperationId',
+ 'tags': ['tag1'],
+ 'responses': {},
+ },
+ },
+ },
+ ]
+)
+def test_duplicate_operationIds_fails(apis):
+ with pytest.raises(SwaggerValidationError) as excinfo:
+ validate_apis(apis, lambda x: x)
+
+ swagger_validation_error = excinfo.value
+ error_message = swagger_validation_error.args[0]
+
+ assert error_message == "Duplicate operationId: duplicateOperationId"
+
+
[email protected](
+ 'apis',
+ [
+ {
+ '/api1': {
+ 'get': {
+ 'operationId': 'duplicateOperationId',
+ 'tags': ['tag1'],
+ 'responses': {},
+ },
+ },
+ '/api2': {
+ 'get': {
+ 'operationId': 'duplicateOperationId',
+ 'tags': ['tag2'],
+ 'responses': {},
+ },
+ },
+ '/api3': {
+ 'get': {
+ 'operationId': 'duplicateOperationId',
+ 'responses': {},
+ },
+ },
+ },
+ ]
+)
+def test_duplicate_operationIds_succeeds_if_tags_differ(apis):
+ validate_apis(apis, lambda x: x)
diff --git a/tests/validator20/validate_rich_spec_test.py b/tests/validator20/validate_rich_spec_test.py
index 593b9c0..0f5df04 100644
--- a/tests/validator20/validate_rich_spec_test.py
+++ b/tests/validator20/validate_rich_spec_test.py
@@ -36,10 +36,11 @@ def test_failure_on_duplicate_operation_parameters(swagger_spec):
def test_failure_on_unresolvable_path_parameter(swagger_spec):
- swagger_spec['paths']['/pet/{foo}'] = swagger_spec['paths']['/pet']
+ swagger_spec['paths']['/pet/{petId}']['get']['parameters'] = []
+
with pytest.raises(SwaggerValidationError) as exc_info:
validate_spec(swagger_spec)
- assert "Path parameter 'foo' used is not documented on '/pet/{foo}'" in str(exc_info.value)
+ assert "Path parameter 'petId' used is not documented on '/pet/{petId}'" in str(exc_info.value)
def test_failure_on_path_parameter_used_but_not_defined(swagger_spec):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": null,
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
httpretty==1.1.4
importlib-metadata==4.8.3
iniconfig==1.1.1
jsonschema==3.2.0
mock==5.2.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
PyYAML==6.0.1
six==1.17.0
-e git+https://github.com/Yelp/swagger_spec_validator.git@40e1cc926775777ff2d56e271fd61697c6235579#egg=swagger_spec_validator
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: swagger_spec_validator
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- httpretty==1.1.4
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jsonschema==3.2.0
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pyyaml==6.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/swagger_spec_validator
| [
"tests/validator20/validate_apis_test.py::test_duplicate_operationIds_fails[apis0]",
"tests/validator20/validate_apis_test.py::test_duplicate_operationIds_fails[apis1]",
"tests/validator20/validate_apis_test.py::test_duplicate_operationIds_fails[apis2]"
] | [
"tests/validator20/validate_apis_test.py::test_api_check_default_succeed[partial_parameter_spec0]",
"tests/validator20/validate_apis_test.py::test_api_check_default_succeed[partial_parameter_spec1]",
"tests/validator20/validate_apis_test.py::test_api_check_default_succeed[partial_parameter_spec2]",
"tests/validator20/validate_apis_test.py::test_api_check_default_succeed[partial_parameter_spec3]",
"tests/validator20/validate_apis_test.py::test_api_check_default_succeed[partial_parameter_spec4]",
"tests/validator20/validate_apis_test.py::test_api_check_default_succeed[partial_parameter_spec5]",
"tests/validator20/validate_apis_test.py::test_api_check_default_succeed[partial_parameter_spec6]",
"tests/validator20/validate_apis_test.py::test_api_check_default_succeed[partial_parameter_spec7]",
"tests/validator20/validate_apis_test.py::test_api_check_default_succeed[partial_parameter_spec8]",
"tests/validator20/validate_apis_test.py::test_api_check_default_succeed[partial_parameter_spec9]",
"tests/validator20/validate_apis_test.py::test_api_check_default_succeed[partial_parameter_spec10]",
"tests/validator20/validate_apis_test.py::test_api_check_default_fails[partial_parameter_spec0-type-wrong_type]",
"tests/validator20/validate_apis_test.py::test_api_check_default_fails[partial_parameter_spec1-type-wrong_type]",
"tests/validator20/validate_apis_test.py::test_api_check_default_fails[partial_parameter_spec2-type-wrong_type]",
"tests/validator20/validate_apis_test.py::test_api_check_default_fails[partial_parameter_spec3-type-wrong_type]",
"tests/validator20/validate_apis_test.py::test_api_check_default_fails[partial_parameter_spec4-type-wrong_type]",
"tests/validator20/validate_apis_test.py::test_api_check_default_fails[partial_parameter_spec5-type-wrong_type]",
"tests/validator20/validate_apis_test.py::test_api_check_default_fails[partial_parameter_spec6-type--1]",
"tests/validator20/validate_apis_test.py::test_api_check_default_fails[partial_parameter_spec7-minLength-short_string]",
"tests/validator20/validate_apis_test.py::test_api_check_default_fails[partial_parameter_spec8-type-not_a_number_or_boolean]",
"tests/validator20/validate_rich_spec_test.py::test_failure_on_duplicate_api_parameters",
"tests/validator20/validate_rich_spec_test.py::test_failure_on_duplicate_operation_parameters",
"tests/validator20/validate_rich_spec_test.py::test_failure_on_unresolvable_path_parameter",
"tests/validator20/validate_rich_spec_test.py::test_failure_on_path_parameter_used_but_not_defined",
"tests/validator20/validate_rich_spec_test.py::test_failure_on_unresolvable_ref_of_props_required_list"
] | [
"tests/validator20/validate_apis_test.py::test_api_level_params_ok",
"tests/validator20/validate_apis_test.py::test_api_level_x_hyphen_ok",
"tests/validator20/validate_apis_test.py::test_duplicate_operationIds_succeeds_if_tags_differ[apis0]",
"tests/validator20/validate_rich_spec_test.py::test_failure_on_unresolvable_model_reference_from_model",
"tests/validator20/validate_rich_spec_test.py::test_failure_on_unresolvable_model_reference_from_param",
"tests/validator20/validate_rich_spec_test.py::test_failure_on_unresolvable_model_reference_from_resp"
] | [] | Apache License 2.0 | 2,534 | 446 | [
"swagger_spec_validator/validator20.py"
] |
|
algoo__hapic-51 | 730d2bee7907ae8f68de012c1cfd3e059840ae9e | 2018-05-18 14:02:01 | d72385ed5e1321d2216f42f6d6267e30f5dab28a | diff --git a/hapic/context.py b/hapic/context.py
index 97aa0c4..7b5d9b9 100644
--- a/hapic/context.py
+++ b/hapic/context.py
@@ -135,6 +135,15 @@ class ContextInterface(object):
"""
raise NotImplementedError()
+ def is_debug(self) -> bool:
+ """
+ Method called to know if Hapic has been called in debug mode.
+ Debug mode provide some informations like debug trace and error
+ message in body when internal error happen.
+ :return: True if in debug mode
+ """
+ raise NotImplementedError()
+
class HandledException(object):
"""
diff --git a/hapic/decorator.py b/hapic/decorator.py
index 10c6036..8d7f284 100644
--- a/hapic/decorator.py
+++ b/hapic/decorator.py
@@ -420,7 +420,10 @@ class ExceptionHandlerControllerWrapper(ControllerWrapper):
func_kwargs,
)
except self.handled_exception_class as exc:
- response_content = self.error_builder.build_from_exception(exc)
+ response_content = self.error_builder.build_from_exception(
+ exc,
+ include_traceback=self.context.is_debug(),
+ )
# Check error format
dumped = self.error_builder.dump(response_content).data
diff --git a/hapic/error.py b/hapic/error.py
index 9157657..073b849 100644
--- a/hapic/error.py
+++ b/hapic/error.py
@@ -1,4 +1,6 @@
# -*- coding: utf-8 -*-
+import traceback
+
import marshmallow
from hapic.processor import ProcessValidationError
@@ -9,7 +11,11 @@ class ErrorBuilderInterface(marshmallow.Schema):
ErrorBuilder is a class who represent a Schema (marshmallow.Schema) and
can generate a response content from exception (build_from_exception)
"""
- def build_from_exception(self, exception: Exception) -> dict:
+ def build_from_exception(
+ self,
+ exception: Exception,
+ include_traceback: bool = False,
+ ) -> dict:
"""
Build the error response content from given exception
:param exception: Original exception who invoke this method
@@ -34,14 +40,28 @@ class DefaultErrorBuilder(ErrorBuilderInterface):
details = marshmallow.fields.Dict(required=False, missing={})
code = marshmallow.fields.Raw(missing=None)
- def build_from_exception(self, exception: Exception) -> dict:
+ def build_from_exception(
+ self,
+ exception: Exception,
+ include_traceback: bool = False,
+ ) -> dict:
"""
See hapic.error.ErrorBuilderInterface#build_from_exception docstring
"""
# TODO: "error_detail" attribute name should be configurable
+ message = str(exception)
+ if not message:
+ message = type(exception).__name__
+
+ details = {
+ 'error_detail': getattr(exception, 'error_detail', {}),
+ }
+ if include_traceback:
+ details['traceback'] = traceback.format_exc()
+
return {
- 'message': str(exception),
- 'details': getattr(exception, 'error_detail', {}),
+ 'message': message,
+ 'details': details,
'code': None,
}
diff --git a/hapic/ext/bottle/context.py b/hapic/ext/bottle/context.py
index c5090b8..ba8d75a 100644
--- a/hapic/ext/bottle/context.py
+++ b/hapic/ext/bottle/context.py
@@ -33,12 +33,14 @@ class BottleContext(BaseContext):
self,
app: bottle.Bottle,
default_error_builder: ErrorBuilderInterface=None,
+ debug: bool = False,
):
self._handled_exceptions = [] # type: typing.List[HandledException] # nopep8
self._exceptions_handler_installed = False
self.app = app
self.default_error_builder = \
default_error_builder or DefaultErrorBuilder() # FDV
+ self.debug = debug
def get_request_parameters(self, *args, **kwargs) -> RequestParameters:
path_parameters = dict(bottle.request.url_args)
@@ -164,3 +166,6 @@ class BottleContext(BaseContext):
See hapic.context.BaseContext#_get_handled_exception_class_and_http_codes # nopep8
"""
return self._handled_exceptions
+
+ def is_debug(self) -> bool:
+ return self.debug
diff --git a/hapic/ext/flask/context.py b/hapic/ext/flask/context.py
index 0908dc2..b548d11 100644
--- a/hapic/ext/flask/context.py
+++ b/hapic/ext/flask/context.py
@@ -32,11 +32,13 @@ class FlaskContext(BaseContext):
self,
app: Flask,
default_error_builder: ErrorBuilderInterface=None,
+ debug: bool = False,
):
self._handled_exceptions = [] # type: typing.List[HandledException] # nopep8
self.app = app
self.default_error_builder = \
default_error_builder or DefaultErrorBuilder() # FDV
+ self.debug = debug
def get_request_parameters(self, *args, **kwargs) -> RequestParameters:
from flask import request
@@ -165,3 +167,6 @@ class FlaskContext(BaseContext):
http_code: int,
) -> None:
raise NotImplementedError('TODO')
+
+ def is_debug(self) -> bool:
+ return self.debug
diff --git a/hapic/ext/pyramid/context.py b/hapic/ext/pyramid/context.py
index d39b615..6fcde49 100644
--- a/hapic/ext/pyramid/context.py
+++ b/hapic/ext/pyramid/context.py
@@ -31,11 +31,13 @@ class PyramidContext(BaseContext):
self,
configurator: 'Configurator',
default_error_builder: ErrorBuilderInterface = None,
+ debug: bool = False,
):
self._handled_exceptions = [] # type: typing.List[HandledException] # nopep8
self.configurator = configurator
self.default_error_builder = \
default_error_builder or DefaultErrorBuilder() # FDV
+ self.debug = debug
def get_request_parameters(self, *args, **kwargs) -> RequestParameters:
req = args[-1] # TODO : Check
@@ -189,3 +191,6 @@ class PyramidContext(BaseContext):
http_code: int,
) -> None:
raise NotImplementedError('TODO')
+
+ def is_debug(self) -> bool:
+ return self.debug
| Error catching: erro details must be given by default
When error is catch and transformed to response, error information must be hidden by default. A parameter like "debug" should be able to return it. | algoo/hapic | diff --git a/tests/unit/test_decorator.py b/tests/unit/test_decorator.py
index e088a8a..08906a4 100644
--- a/tests/unit/test_decorator.py
+++ b/tests/unit/test_decorator.py
@@ -276,7 +276,7 @@ class TestExceptionHandlerControllerWrapper(Base):
response = func(42)
assert HTTPStatus.INTERNAL_SERVER_ERROR == response.status_code
assert {
- 'details': {},
+ 'details': {'error_detail': {}},
'message': 'We are testing',
'code': None,
} == json.loads(response.body)
@@ -305,7 +305,7 @@ class TestExceptionHandlerControllerWrapper(Base):
assert response.status_code == HTTPStatus.INTERNAL_SERVER_ERROR
assert {
'message': 'We are testing',
- 'details': {'foo': 'bar'},
+ 'details': {'error_detail': {'foo': 'bar'}},
'code': None,
} == json.loads(response.body)
@@ -314,7 +314,11 @@ class TestExceptionHandlerControllerWrapper(Base):
pass
class MyErrorBuilder(DefaultErrorBuilder):
- def build_from_exception(self, exception: Exception) -> dict:
+ def build_from_exception(
+ self,
+ exception: Exception,
+ include_traceback: bool = False,
+ ) -> dict:
# this is not matching with DefaultErrorBuilder schema
return {}
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 1
},
"num_modified_files": 6
} | 0.38 | {
"env_vars": null,
"env_yml_path": [],
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [],
"python": "3.6",
"reqs_path": [
"requirements/base.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
beautifulsoup4==4.12.3
bottle==0.13.2
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
coverage==6.2
dataclasses==0.8
Flask==2.0.3
-e git+https://github.com/algoo/hapic.git@730d2bee7907ae8f68de012c1cfd3e059840ae9e#egg=hapic
hapic-apispec==0.37.0
hupper==1.10.3
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
itsdangerous==2.0.1
Jinja2==3.0.3
MarkupSafe==2.0.1
marshmallow==2.21.0
multidict==5.2.0
packaging==21.3
PasteDeploy==2.1.1
plaster==1.0
plaster-pastedeploy==0.7
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pyramid==2.0.2
pytest==7.0.1
pytest-cov==4.0.0
PyYAML==6.0.1
requests==2.27.1
soupsieve==2.3.2.post1
tomli==1.2.3
translationstring==1.4
typing_extensions==4.1.1
urllib3==1.26.20
venusian==3.0.0
waitress==2.0.0
WebOb==1.8.9
WebTest==3.0.0
Werkzeug==2.0.3
zipp==3.6.0
zope.deprecation==4.4.0
zope.interface==5.5.2
| name: hapic
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- beautifulsoup4==4.12.3
- bottle==0.13.2
- charset-normalizer==2.0.12
- click==8.0.4
- coverage==6.2
- dataclasses==0.8
- flask==2.0.3
- hapic-apispec==0.37.0
- hupper==1.10.3
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- itsdangerous==2.0.1
- jinja2==3.0.3
- markupsafe==2.0.1
- marshmallow==2.21.0
- multidict==5.2.0
- packaging==21.3
- pastedeploy==2.1.1
- plaster==1.0
- plaster-pastedeploy==0.7
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pyramid==2.0.2
- pytest==7.0.1
- pytest-cov==4.0.0
- pyyaml==6.0.1
- requests==2.27.1
- soupsieve==2.3.2.post1
- tomli==1.2.3
- translationstring==1.4
- typing-extensions==4.1.1
- urllib3==1.26.20
- venusian==3.0.0
- waitress==2.0.0
- webob==1.8.9
- webtest==3.0.0
- werkzeug==2.0.3
- zipp==3.6.0
- zope-deprecation==4.4.0
- zope-interface==5.5.2
prefix: /opt/conda/envs/hapic
| [
"tests/unit/test_decorator.py::TestExceptionHandlerControllerWrapper::test_unit__exception_handled__ok__nominal_case",
"tests/unit/test_decorator.py::TestExceptionHandlerControllerWrapper::test_unit__exception_handled__ok__exception_error_dict"
] | [] | [
"tests/unit/test_decorator.py::TestControllerWrapper::test_unit__base_controller_wrapper__ok__no_behaviour",
"tests/unit/test_decorator.py::TestControllerWrapper::test_unit__base_controller__ok__replaced_response",
"tests/unit/test_decorator.py::TestControllerWrapper::test_unit__controller_wrapper__ok__overload_input",
"tests/unit/test_decorator.py::TestInputControllerWrapper::test_unit__input_data_wrapping__ok__nominal_case",
"tests/unit/test_decorator.py::TestInputControllerWrapper::test_unit__multi_query_param_values__ok__use_as_list",
"tests/unit/test_decorator.py::TestInputControllerWrapper::test_unit__multi_query_param_values__ok__without_as_list",
"tests/unit/test_decorator.py::TestOutputControllerWrapper::test_unit__output_data_wrapping__ok__nominal_case",
"tests/unit/test_decorator.py::TestOutputControllerWrapper::test_unit__output_data_wrapping__fail__error_response",
"tests/unit/test_decorator.py::TestExceptionHandlerControllerWrapper::test_unit__exception_handler__error__error_content_malformed"
] | [] | MIT License | 2,543 | 1,577 | [
"hapic/context.py",
"hapic/decorator.py",
"hapic/error.py",
"hapic/ext/bottle/context.py",
"hapic/ext/flask/context.py",
"hapic/ext/pyramid/context.py"
] |
|
CORE-GATECH-GROUP__serpent-tools-154 | 911894d67eb9677c4430a1aee91e9d1461ffc44b | 2018-05-18 16:25:11 | 7c7da6012f509a2e71c3076ab510718585f75b11 | diff --git a/serpentTools/objects/containers.py b/serpentTools/objects/containers.py
index 8c30e1b..bcc9103 100644
--- a/serpentTools/objects/containers.py
+++ b/serpentTools/objects/containers.py
@@ -35,8 +35,8 @@ for xsSpectrum, xsType in product({'INF', 'B1'},
for xx in range(SCATTER_ORDERS)})
HOMOG_VAR_TO_ATTR = {
- 'MICRO_E': 'microGroups', 'MICRO_NG': '_numMicroGroups',
- 'MACRO_E': 'groups', 'MACRO_NG': '_numGroups'}
+ 'MICRO_E': 'microGroups', 'MICRO_NG': 'numMicroGroups',
+ 'MACRO_E': 'groups', 'MACRO_NG': 'numGroups'}
__all__ = ('DET_COLS', 'HomogUniv', 'BranchContainer', 'Detector',
'DetectorBase', 'SCATTER_MATS', 'SCATTER_ORDERS')
@@ -147,12 +147,22 @@ class HomogUniv(NamedObject):
self._numGroups = self.groups.size - 1
return self._numGroups
+ @numGroups.setter
+ def numGroups(self, value):
+ value = value if isinstance(value, int) else int(value)
+ self._numGroups = value
+
@property
def numMicroGroups(self):
if self._numMicroGroups is None and self.microGroups is not None:
self._numMicroGroups = self.microGroups.size - 1
return self._numMicroGroups
+ @numMicroGroups.setter
+ def numMicroGroups(self, value):
+ value = value if isinstance(value, int) else int(value)
+ self._numMicroGroups = value
+
def __str__(self):
extras = []
if self.bu is not None:
| [BUG] number of groups stored as a float; causes reshape scatter matrices to fail
## Summary of issue
The `addData` routine stores the number of energy groups as a float. This causes numpy to fail during the reshaping of scattering matrices.
## Code for reproducing the issue
```
import serpentTools
from serpentTools.settings import rc
rc['xs.reshapeScatter'] = True
r = serpentTools.read('bwr_res.m')
```
## Actual outcome including console output and error traceback if applicable
```
~/.local/lib/python3.5/site-packages/serpentTools-0.4.0+9.g277cb89-py3.5.egg/serpentTools/objects/containers.py in addData(self, variableName, variableValue, uncertainty)
200 'should be boolean.'.format(type(uncertainty)))
201
--> 202 value = self._cleanData(variableName, variableValue)
203 if variableName in HOMOG_VAR_TO_ATTR:
204 value = value if variableValue.size > 1 else value[0]
~/.local/lib/python3.5/site-packages/serpentTools-0.4.0+9.g277cb89-py3.5.egg/serpentTools/objects/containers.py in _cleanData(self, name, value)
233 .format(name))
234 else:
--> 235 value = value.reshape(ng, ng)
236 return value
237
TypeError: 'numpy.float64' object cannot be interpreted as an integer
```
## Expected outcome
No error and scattering matrices are reshaped properly
## Versions
* Version from ``serpentTools.__version__`` `0.4.0+9.g277cb89`
* Python version - ``python --version`` `3.5`
* IPython or Jupyter version if applicable - `ipython 6.2.1`
| CORE-GATECH-GROUP/serpent-tools | diff --git a/serpentTools/tests/test_container.py b/serpentTools/tests/test_container.py
index 721dd1d..ded8988 100644
--- a/serpentTools/tests/test_container.py
+++ b/serpentTools/tests/test_container.py
@@ -4,7 +4,7 @@ import unittest
from itertools import product
from six import iteritems
-from numpy import array, arange, ndarray
+from numpy import array, arange, ndarray, float64
from numpy.testing import assert_array_equal
from serpentTools.settings import rc
@@ -171,6 +171,37 @@ class UnivTruthTester(unittest.TestCase):
self.assertTrue(univ.hasData, msg=msg)
+class HomogUnivIntGroupsTester(unittest.TestCase):
+ """Class that ensures number of groups is stored as ints."""
+
+ def setUp(self):
+ self.univ = HomogUniv('intGroups', 0, 0, 0)
+ self.numGroups = 2
+ self.numMicroGroups = 4
+
+ def test_univGroupsFromFloats(self):
+ """Vefify integer groups are stored when passed as floats."""
+ self.setAs(float)
+ self._tester()
+
+ def test_univGroupsFromNPFloats(self):
+ """Vefify integer groups are stored when passed as numpy floats."""
+ self.setAs(float64)
+ self._tester()
+
+ def _tester(self):
+ for attr in {'numGroups', 'numMicroGroups'}:
+ actual = getattr(self.univ, attr)
+ msg ='Attribute: {}'.format(attr)
+ self.assertIsInstance(actual, int, msg=msg)
+ expected = getattr(self, attr)
+ self.assertEqual(expected, actual, msg=msg)
+
+ def setAs(self, func):
+ """Set the number of groups to be as specific type."""
+ for attr in {'numGroups', 'numMicroGroups'}:
+ expected = getattr(self, attr)
+ setattr(self.univ, attr, func(expected))
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 1
} | 0.4 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "numpy>=1.11.1 matplotlib>=1.5.0 pyyaml>=3.08 scipy six",
"pip_packages": [
"numpy>=1.11.1",
"matplotlib>=1.5.0",
"pyyaml>=3.08",
"scipy",
"six",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
cycler @ file:///tmp/build/80754af9/cycler_1637851556182/work
importlib-metadata==4.8.3
iniconfig==1.1.1
kiwisolver @ file:///tmp/build/80754af9/kiwisolver_1612282412546/work
matplotlib @ file:///tmp/build/80754af9/matplotlib-suite_1613407855456/work
numpy @ file:///tmp/build/80754af9/numpy_and_numpy_base_1603483703303/work
olefile @ file:///Users/ktietz/demo/mc3/conda-bld/olefile_1629805411829/work
packaging==21.3
Pillow @ file:///tmp/build/80754af9/pillow_1625670622947/work
pluggy==1.0.0
py==1.11.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==7.0.1
python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
PyYAML==5.4.1
scipy @ file:///tmp/build/80754af9/scipy_1597686635649/work
-e git+https://github.com/CORE-GATECH-GROUP/serpent-tools.git@911894d67eb9677c4430a1aee91e9d1461ffc44b#egg=serpentTools
six @ file:///tmp/build/80754af9/six_1644875935023/work
tomli==1.2.3
tornado @ file:///tmp/build/80754af9/tornado_1606942266872/work
typing_extensions==4.1.1
zipp==3.6.0
| name: serpent-tools
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- blas=1.0=openblas
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- cycler=0.11.0=pyhd3eb1b0_0
- dbus=1.13.18=hb2f20db_0
- expat=2.6.4=h6a678d5_0
- fontconfig=2.14.1=h52c9d5c_1
- freetype=2.12.1=h4a9f257_0
- giflib=5.2.2=h5eee18b_0
- glib=2.69.1=h4ff587b_1
- gst-plugins-base=1.14.1=h6a678d5_1
- gstreamer=1.14.1=h5eee18b_1
- icu=58.2=he6710b0_3
- jpeg=9e=h5eee18b_3
- kiwisolver=1.3.1=py36h2531618_0
- lcms2=2.16=hb9589c4_0
- ld_impl_linux-64=2.40=h12ee557_0
- lerc=4.0.0=h6a678d5_0
- libdeflate=1.22=h5eee18b_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgfortran-ng=7.5.0=ha8ba4b0_17
- libgfortran4=7.5.0=ha8ba4b0_17
- libgomp=11.2.0=h1234567_1
- libopenblas=0.3.18=hf726d26_0
- libpng=1.6.39=h5eee18b_0
- libstdcxx-ng=11.2.0=h1234567_1
- libtiff=4.5.1=hffd6297_1
- libuuid=1.41.5=h5eee18b_0
- libwebp=1.2.4=h11a3e52_1
- libwebp-base=1.2.4=h5eee18b_1
- libxcb=1.15=h7f8727e_0
- libxml2=2.9.14=h74e7548_0
- lz4-c=1.9.4=h6a678d5_1
- matplotlib=3.3.4=py36h06a4308_0
- matplotlib-base=3.3.4=py36h62a2d02_0
- ncurses=6.4=h6a678d5_0
- numpy=1.19.2=py36h6163131_0
- numpy-base=1.19.2=py36h75fe3a5_0
- olefile=0.46=pyhd3eb1b0_0
- openssl=1.1.1w=h7f8727e_0
- pcre=8.45=h295c915_0
- pillow=8.3.1=py36h5aabda8_0
- pip=21.2.2=py36h06a4308_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pyqt=5.9.2=py36h05f1152_2
- python=3.6.13=h12debd9_1
- python-dateutil=2.8.2=pyhd3eb1b0_0
- pyyaml=5.4.1=py36h27cfd23_1
- qt=5.9.7=h5867ecd_1
- readline=8.2=h5eee18b_0
- scipy=1.5.2=py36habc2bb6_0
- setuptools=58.0.4=py36h06a4308_0
- sip=4.19.8=py36hf484d3e_0
- six=1.16.0=pyhd3eb1b0_1
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tornado=6.1=py36h27cfd23_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- yaml=0.2.5=h7b6447c_0
- zlib=1.2.13=h5eee18b_1
- zstd=1.5.6=hc292b87_0
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pytest==7.0.1
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/serpent-tools
| [
"serpentTools/tests/test_container.py::HomogUnivIntGroupsTester::test_univGroupsFromFloats",
"serpentTools/tests/test_container.py::HomogUnivIntGroupsTester::test_univGroupsFromNPFloats"
] | [] | [
"serpentTools/tests/test_container.py::VectoredHomogUnivTester::test_attributes",
"serpentTools/tests/test_container.py::VectoredHomogUnivTester::test_getB1Exp",
"serpentTools/tests/test_container.py::VectoredHomogUnivTester::test_getB1Unc",
"serpentTools/tests/test_container.py::VectoredHomogUnivTester::test_getBothInf",
"serpentTools/tests/test_container.py::VectoredHomogUnivTester::test_getInfExp",
"serpentTools/tests/test_container.py::VectoredHomogUnivTester::test_getInfUnc",
"serpentTools/tests/test_container.py::ReshapedHomogUnivTester::test_attributes",
"serpentTools/tests/test_container.py::ReshapedHomogUnivTester::test_getB1Exp",
"serpentTools/tests/test_container.py::ReshapedHomogUnivTester::test_getB1Unc",
"serpentTools/tests/test_container.py::ReshapedHomogUnivTester::test_getBothInf",
"serpentTools/tests/test_container.py::ReshapedHomogUnivTester::test_getInfExp",
"serpentTools/tests/test_container.py::ReshapedHomogUnivTester::test_getInfUnc",
"serpentTools/tests/test_container.py::UnivTruthTester::test_loadedUnivTrue"
] | [] | MIT License | 2,544 | 442 | [
"serpentTools/objects/containers.py"
] |
|
pypa__twine-369 | 34c08ef97d05d219ae018f041cd37e1d409b7a4d | 2018-05-19 18:01:32 | c977b44cf87e066125e9de496429f8b3f5c90bf4 | codecov[bot]: # [Codecov](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=h1) Report
> Merging [#369](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=desc) into [master](https://codecov.io/gh/pypa/twine/commit/34c08ef97d05d219ae018f041cd37e1d409b7a4d?src=pr&el=desc) will **decrease** coverage by `0.45%`.
> The diff coverage is `100%`.
[](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #369 +/- ##
=========================================
- Coverage 74.55% 74.1% -0.46%
=========================================
Files 13 13
Lines 672 668 -4
Branches 101 100 -1
=========================================
- Hits 501 495 -6
- Misses 143 145 +2
Partials 28 28
```
| [Impacted Files](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [twine/utils.py](https://codecov.io/gh/pypa/twine/pull/369/diff?src=pr&el=tree#diff-dHdpbmUvdXRpbHMucHk=) | `82.25% <100%> (-2.12%)` | :arrow_down: |
| [twine/wininst.py](https://codecov.io/gh/pypa/twine/pull/369/diff?src=pr&el=tree#diff-dHdpbmUvd2luaW5zdC5weQ==) | `29.72% <0%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=footer). Last update [34c08ef...6e1a1ea](https://codecov.io/gh/pypa/twine/pull/369?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
anlutro: I wonder if I could remove this line and just always configure `testpypi`, regardless of `index-servers`? I left it in to stay consistent with old behaviour. https://github.com/pypa/twine/pull/369/files#diff-547bab308763f89cacec226151fcbb80R83
anlutro: docs failed in Travis - unrelated, I guess?
theacodes: Yes, unrelated and up to me to fix. Can you get lint passing?
anlutro: Amended, lint should pass now. | diff --git a/twine/utils.py b/twine/utils.py
index d83e080..4feca1b 100644
--- a/twine/utils.py
+++ b/twine/utils.py
@@ -21,6 +21,7 @@ import getpass
import sys
import argparse
import warnings
+import collections
from requests.exceptions import HTTPError
@@ -48,68 +49,52 @@ TEST_REPOSITORY = "https://test.pypi.org/legacy/"
def get_config(path="~/.pypirc"):
+ # even if the config file does not exist, set up the parser
+ # variable to reduce the number of if/else statements
+ parser = configparser.RawConfigParser()
+
+ # this list will only be used if index-servers
+ # is not defined in the config file
+ index_servers = ["pypi", "testpypi"]
+
+ # default configuration for each repository
+ defaults = {"username": None, "password": None}
+
# Expand user strings in the path
path = os.path.expanduser(path)
- if not os.path.isfile(path):
- return {"pypi": {"repository": DEFAULT_REPOSITORY,
- "username": None,
- "password": None
- },
- "pypitest": {"repository": TEST_REPOSITORY,
- "username": None,
- "password": None
- },
- }
-
# Parse the rc file
- parser = configparser.RawConfigParser()
- parser.read(path)
-
- # Get a list of repositories from the config file
- # format: https://docs.python.org/3/distutils/packageindex.html#pypirc
- if (parser.has_section("distutils") and
- parser.has_option("distutils", "index-servers")):
- repositories = parser.get("distutils", "index-servers").split()
- elif parser.has_section("pypi"):
- # Special case: if the .pypirc file has a 'pypi' section,
- # even if there's no list of index servers,
- # be lenient and include that in our list of repositories.
- repositories = ['pypi']
- else:
- repositories = []
+ if os.path.isfile(path):
+ parser.read(path)
- config = {}
+ # Get a list of index_servers from the config file
+ # format: https://docs.python.org/3/distutils/packageindex.html#pypirc
+ if parser.has_option("distutils", "index-servers"):
+ index_servers = parser.get("distutils", "index-servers").split()
- defaults = {"username": None, "password": None}
- if parser.has_section("server-login"):
for key in ["username", "password"]:
if parser.has_option("server-login", key):
defaults[key] = parser.get("server-login", key)
- for repository in repositories:
- # Skip this repository if it doesn't exist in the config file
- if not parser.has_section(repository):
- continue
+ config = collections.defaultdict(lambda: defaults.copy())
- # Mandatory configuration and defaults
- config[repository] = {
- "repository": DEFAULT_REPOSITORY,
- "username": None,
- "password": None,
- }
+ # don't require users to manually configure URLs for these repositories
+ config["pypi"]["repository"] = DEFAULT_REPOSITORY
+ if "testpypi" in index_servers:
+ config["testpypi"]["repository"] = TEST_REPOSITORY
- # Optional configuration values
+ # optional configuration values for individual repositories
+ for repository in index_servers:
for key in [
"username", "repository", "password",
"ca_cert", "client_cert",
]:
if parser.has_option(repository, key):
config[repository][key] = parser.get(repository, key)
- elif defaults.get(key):
- config[repository][key] = defaults[key]
- return config
+ # convert the defaultdict to a regular dict at this point
+ # to prevent surprising behavior later on
+ return dict(config)
def get_repository_from_config(config_file, repository, repository_url=None):
| Twine should have a built-in alias for testpypi
Instead of needing to specify the full upload URL for Test PyPI we should always have an alias ready, for example:
```
twine upload --repository=testpypi dist/*
```
Should work even without a `-/.pypirc`. If `testpypi` is defined in `~/.pypic`, it should take precedence. | pypa/twine | diff --git a/tests/test_utils.py b/tests/test_utils.py
index 4cd45a6..4d60e04 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -72,6 +72,11 @@ def test_get_config_no_distutils(tmpdir):
"username": "testuser",
"password": "testpassword",
},
+ "testpypi": {
+ "repository": utils.TEST_REPOSITORY,
+ "username": None,
+ "password": None,
+ },
}
@@ -97,6 +102,18 @@ def test_get_config_no_section(tmpdir):
}
+def test_get_config_override_pypi_url(tmpdir):
+ pypirc = os.path.join(str(tmpdir), ".pypirc")
+
+ with open(pypirc, "w") as fp:
+ fp.write(textwrap.dedent("""
+ [pypi]
+ repository = http://pypiproxy
+ """))
+
+ assert utils.get_config(pypirc)['pypi']['repository'] == 'http://pypiproxy'
+
+
def test_get_config_missing(tmpdir):
pypirc = os.path.join(str(tmpdir), ".pypirc")
@@ -106,7 +123,7 @@ def test_get_config_missing(tmpdir):
"username": None,
"password": None,
},
- "pypitest": {
+ "testpypi": {
"repository": utils.TEST_REPOSITORY,
"username": None,
"password": None
@@ -143,8 +160,13 @@ def test_get_config_deprecated_pypirc():
assert utils.get_config(deprecated_pypirc_path) == {
"pypi": {
"repository": utils.DEFAULT_REPOSITORY,
- "username": 'testusername',
- "password": 'testpassword',
+ "username": "testusername",
+ "password": "testpassword",
+ },
+ "testpypi": {
+ "repository": utils.TEST_REPOSITORY,
+ "username": "testusername",
+ "password": "testpassword",
},
}
| {
"commit_name": "merge_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.11 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[keyring]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"docs/requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
bleach==4.1.0
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
colorama==0.4.5
cryptography==40.0.2
distlib==0.3.9
doc8==0.11.2
docutils==0.18.1
filelock==3.4.1
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.4.0
iniconfig==1.1.1
jeepney==0.7.1
Jinja2==3.0.3
keyring==23.4.1
MarkupSafe==2.0.1
packaging==21.3
pbr==6.1.1
pkginfo==1.10.0
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pycparser==2.21
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytz==2025.2
readme-renderer==34.0
releases==2.1.1
requests==2.27.1
requests-toolbelt==1.0.0
restructuredtext-lint==1.4.0
rfc3986==1.5.0
SecretStorage==3.3.3
semantic-version==2.6.0
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-rtd-theme==2.0.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jquery==4.1
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
stevedore==3.5.2
toml==0.10.2
tomli==1.2.3
tox==3.28.0
tqdm==4.64.1
-e git+https://github.com/pypa/twine.git@34c08ef97d05d219ae018f041cd37e1d409b7a4d#egg=twine
typing_extensions==4.1.1
urllib3==1.26.20
virtualenv==20.17.1
webencodings==0.5.1
zipp==3.6.0
| name: twine
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- bleach==4.1.0
- cffi==1.15.1
- charset-normalizer==2.0.12
- colorama==0.4.5
- cryptography==40.0.2
- distlib==0.3.9
- doc8==0.11.2
- docutils==0.18.1
- filelock==3.4.1
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.4.0
- iniconfig==1.1.1
- jeepney==0.7.1
- jinja2==3.0.3
- keyring==23.4.1
- markupsafe==2.0.1
- packaging==21.3
- pbr==6.1.1
- pkginfo==1.10.0
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycparser==2.21
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytz==2025.2
- readme-renderer==34.0
- releases==2.1.1
- requests==2.27.1
- requests-toolbelt==1.0.0
- restructuredtext-lint==1.4.0
- rfc3986==1.5.0
- secretstorage==3.3.3
- semantic-version==2.6.0
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-rtd-theme==2.0.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jquery==4.1
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- stevedore==3.5.2
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- tqdm==4.64.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- virtualenv==20.17.1
- webencodings==0.5.1
- zipp==3.6.0
prefix: /opt/conda/envs/twine
| [
"tests/test_utils.py::test_get_config_no_distutils",
"tests/test_utils.py::test_get_config_missing",
"tests/test_utils.py::test_get_config_deprecated_pypirc"
] | [] | [
"tests/test_utils.py::test_get_config",
"tests/test_utils.py::test_get_config_no_section",
"tests/test_utils.py::test_get_config_override_pypi_url",
"tests/test_utils.py::test_get_repository_config_missing",
"tests/test_utils.py::test_get_userpass_value[cli-config0-key-<lambda>-cli]",
"tests/test_utils.py::test_get_userpass_value[None-config1-key-<lambda>-value]",
"tests/test_utils.py::test_get_userpass_value[None-config2-key-<lambda>-fallback]",
"tests/test_utils.py::test_default_to_environment_action[MY_PASSWORD-None-environ0-None]",
"tests/test_utils.py::test_default_to_environment_action[MY_PASSWORD-None-environ1-foo]",
"tests/test_utils.py::test_default_to_environment_action[URL-https://example.org-environ2-https://example.org]",
"tests/test_utils.py::test_default_to_environment_action[URL-https://example.org-environ3-https://pypi.org]",
"tests/test_utils.py::test_get_password_keyring_overrides_prompt",
"tests/test_utils.py::test_get_password_keyring_defers_to_prompt",
"tests/test_utils.py::test_get_password_keyring_missing_prompts",
"tests/test_utils.py::test_get_password_runtime_error_suppressed",
"tests/test_utils.py::test_no_positional_on_method",
"tests/test_utils.py::test_no_positional_on_function"
] | [] | Apache License 2.0 | 2,551 | 945 | [
"twine/utils.py"
] |
tornadoweb__tornado-2393 | eb487cac3d829292ecca6e5124b1da5ae6bba407 | 2018-05-19 23:59:17 | 6410cd98c1a5e938246a17cac0769f689ed471c5 | diff --git a/tornado/autoreload.py b/tornado/autoreload.py
index 2f911270..7d69474a 100644
--- a/tornado/autoreload.py
+++ b/tornado/autoreload.py
@@ -107,6 +107,9 @@ _watched_files = set()
_reload_hooks = []
_reload_attempted = False
_io_loops = weakref.WeakKeyDictionary() # type: ignore
+_autoreload_is_main = False
+_original_argv = None
+_original_spec = None
def start(check_time=500):
@@ -214,11 +217,15 @@ def _reload():
# __spec__ is not available (Python < 3.4), check instead if
# sys.path[0] is an empty string and add the current directory to
# $PYTHONPATH.
- spec = getattr(sys.modules['__main__'], '__spec__', None)
- if spec:
- argv = ['-m', spec.name] + sys.argv[1:]
+ if _autoreload_is_main:
+ spec = _original_spec
+ argv = _original_argv
else:
+ spec = getattr(sys.modules['__main__'], '__spec__', None)
argv = sys.argv
+ if spec:
+ argv = ['-m', spec.name] + argv[1:]
+ else:
path_prefix = '.' + os.pathsep
if (sys.path[0] == '' and
not os.environ.get("PYTHONPATH", "").startswith(path_prefix)):
@@ -226,7 +233,7 @@ def _reload():
os.environ.get("PYTHONPATH", ""))
if not _has_execv:
subprocess.Popen([sys.executable] + argv)
- sys.exit(0)
+ os._exit(0)
else:
try:
os.execv(sys.executable, [sys.executable] + argv)
@@ -269,7 +276,17 @@ def main():
can catch import-time problems like syntax errors that would otherwise
prevent the script from reaching its call to `wait`.
"""
+ # Remember that we were launched with autoreload as main.
+ # The main module can be tricky; set the variables both in our globals
+ # (which may be __main__) and the real importable version.
+ import tornado.autoreload
+ global _autoreload_is_main
+ global _original_argv, _original_spec
+ tornado.autoreload._autoreload_is_main = _autoreload_is_main = True
original_argv = sys.argv
+ tornado.autoreload._original_argv = _original_argv = original_argv
+ original_spec = getattr(sys.modules['__main__'], '__spec__', None)
+ tornado.autoreload._original_spec = _original_spec = original_spec
sys.argv = sys.argv[:]
if len(sys.argv) >= 3 and sys.argv[1] == "-m":
mode = "module"
diff --git a/tornado/iostream.py b/tornado/iostream.py
index 89e1e234..63110a1a 100644
--- a/tornado/iostream.py
+++ b/tornado/iostream.py
@@ -1410,13 +1410,7 @@ class IOStream(BaseIOStream):
return future
def _handle_connect(self):
- try:
- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- except socket.error as e:
- # Hurd doesn't allow SO_ERROR for loopback sockets because all
- # errors for such sockets are reported synchronously.
- if errno_from_exception(e) == errno.ENOPROTOOPT:
- err = 0
+ err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.error = socket.error(err, os.strerror(err))
# IOLoop implementations may vary: some of them return
diff --git a/tornado/netutil.py b/tornado/netutil.py
index e63683ad..08c9d886 100644
--- a/tornado/netutil.py
+++ b/tornado/netutil.py
@@ -138,12 +138,7 @@ def bind_sockets(port, address=None, family=socket.AF_UNSPEC,
raise
set_close_exec(sock.fileno())
if os.name != 'nt':
- try:
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except socket.error as e:
- if errno_from_exception(e) != errno.ENOPROTOOPT:
- # Hurd doesn't support SO_REUSEADDR.
- raise
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if reuse_port:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if af == socket.AF_INET6:
@@ -185,12 +180,7 @@ if hasattr(socket, 'AF_UNIX'):
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
set_close_exec(sock.fileno())
- try:
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except socket.error as e:
- if errno_from_exception(e) != errno.ENOPROTOOPT:
- # Hurd doesn't support SO_REUSEADDR
- raise
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
try:
st = os.stat(file)
diff --git a/tornado/web.py b/tornado/web.py
index 6760b0b9..f970bd13 100644
--- a/tornado/web.py
+++ b/tornado/web.py
@@ -749,18 +749,7 @@ class RequestHandler(object):
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
- """Renders the template with the given arguments as the response.
-
- ``render()`` calls ``finish()``, so no other output methods can be called
- after it.
-
- Returns a `.Future` with the same semantics as the one returned by `finish`.
- Awaiting this `.Future` is optional.
-
- .. versionchanged:: 5.1
-
- Now returns a `.Future` instead of ``None``.
- """
+ """Renders the template with the given arguments as the response."""
if self._finished:
raise RuntimeError("Cannot render() after finish()")
html = self.render_string(template_name, **kwargs)
@@ -821,7 +810,7 @@ class RequestHandler(object):
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
- return self.finish(html)
+ self.finish(html)
def render_linked_js(self, js_files):
"""Default method used to render the final js links for the
@@ -1004,20 +993,7 @@ class RequestHandler(object):
return future
def finish(self, chunk=None):
- """Finishes this response, ending the HTTP request.
-
- Passing a ``chunk`` to ``finish()`` is equivalent to passing that
- chunk to ``write()`` and then calling ``finish()`` with no arguments.
-
- Returns a `.Future` which may optionally be awaited to track the sending
- of the response to the client. This `.Future` resolves when all the response
- data has been sent, and raises an error if the connection is closed before all
- data can be sent.
-
- .. versionchanged:: 5.1
-
- Now returns a `.Future` instead of ``None``.
- """
+ """Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice")
@@ -1049,13 +1025,12 @@ class RequestHandler(object):
# are keepalive connections)
self.request.connection.set_close_callback(None)
- future = self.flush(include_footers=True)
+ self.flush(include_footers=True)
self.request.connection.finish()
self._log()
self._finished = True
self.on_finish()
self._break_cycles()
- return future
def detach(self):
"""Take control of the underlying stream.
| autoreload: Fix argv preservation
`autoreload` currently has a wrapper mode (e.g. `python -m tornado.autoreload -m tornado.test`) for scripts, and an in-process mode (enabled by `Application(..., debug=True)`). It's useful to combine these, since the wrapper can catch syntax errors that cause the process to abort before entering its IOLoop. However, this doesn't work as well as it should, because the `main` wrapper only restores `sys.argv` if the process exits, meaning the `-m tornado.autoreload` flags are lost if the inner autoreload fires. The original argv needs to be stored in a global when `autoreload` is `__main__`, so that it can be used in `_reload()`. | tornadoweb/tornado | diff --git a/tornado/test/autoreload_test.py b/tornado/test/autoreload_test.py
index 6a9729db..1ea53167 100644
--- a/tornado/test/autoreload_test.py
+++ b/tornado/test/autoreload_test.py
@@ -1,14 +1,19 @@
from __future__ import absolute_import, division, print_function
import os
+import shutil
import subprocess
from subprocess import Popen
import sys
from tempfile import mkdtemp
+import time
from tornado.test.util import unittest
-MAIN = """\
+class AutoreloadTest(unittest.TestCase):
+
+ def test_reload_module(self):
+ main = """\
import os
import sys
@@ -24,15 +29,13 @@ if 'TESTAPP_STARTED' not in os.environ:
autoreload._reload()
"""
-
-class AutoreloadTest(unittest.TestCase):
- def test_reload_module(self):
# Create temporary test application
path = mkdtemp()
+ self.addCleanup(shutil.rmtree, path)
os.mkdir(os.path.join(path, 'testapp'))
open(os.path.join(path, 'testapp/__init__.py'), 'w').close()
with open(os.path.join(path, 'testapp/__main__.py'), 'w') as f:
- f.write(MAIN)
+ f.write(main)
# Make sure the tornado module under test is available to the test
# application
@@ -46,3 +49,64 @@ class AutoreloadTest(unittest.TestCase):
universal_newlines=True)
out = p.communicate()[0]
self.assertEqual(out, 'Starting\nStarting\n')
+
+ def test_reload_wrapper_preservation(self):
+ # This test verifies that when `python -m tornado.autoreload`
+ # is used on an application that also has an internal
+ # autoreload, the reload wrapper is preserved on restart.
+ main = """\
+import os
+import sys
+
+# This import will fail if path is not set up correctly
+import testapp
+
+if 'tornado.autoreload' not in sys.modules:
+ raise Exception('started without autoreload wrapper')
+
+import tornado.autoreload
+
+print('Starting')
+sys.stdout.flush()
+if 'TESTAPP_STARTED' not in os.environ:
+ os.environ['TESTAPP_STARTED'] = '1'
+ # Simulate an internal autoreload (one not caused
+ # by the wrapper).
+ tornado.autoreload._reload()
+else:
+ # Exit directly so autoreload doesn't catch it.
+ os._exit(0)
+"""
+
+ # Create temporary test application
+ path = mkdtemp()
+ os.mkdir(os.path.join(path, 'testapp'))
+ self.addCleanup(shutil.rmtree, path)
+ init_file = os.path.join(path, 'testapp', '__init__.py')
+ open(init_file, 'w').close()
+ main_file = os.path.join(path, 'testapp', '__main__.py')
+ with open(main_file, 'w') as f:
+ f.write(main)
+
+ # Make sure the tornado module under test is available to the test
+ # application
+ pythonpath = os.getcwd()
+ if 'PYTHONPATH' in os.environ:
+ pythonpath += os.pathsep + os.environ['PYTHONPATH']
+
+ autoreload_proc = Popen(
+ [sys.executable, '-m', 'tornado.autoreload', '-m', 'testapp'],
+ stdout=subprocess.PIPE, cwd=path,
+ env=dict(os.environ, PYTHONPATH=pythonpath),
+ universal_newlines=True)
+
+ for i in range(20):
+ if autoreload_proc.poll() is not None:
+ break
+ time.sleep(0.1)
+ else:
+ autoreload_proc.kill()
+ raise Exception("subprocess failed to terminate")
+
+ out = autoreload_proc.communicate()[0]
+ self.assertEqual(out, 'Starting\n' * 2)
diff --git a/tornado/test/web_test.py b/tornado/test/web_test.py
index b77311df..45072aac 100644
--- a/tornado/test/web_test.py
+++ b/tornado/test/web_test.py
@@ -191,40 +191,6 @@ class SecureCookieV2Test(unittest.TestCase):
self.assertEqual(new_handler.get_secure_cookie('foo'), None)
-class FinalReturnTest(WebTestCase):
- def get_handlers(self):
- test = self
-
- class FinishHandler(RequestHandler):
- @gen.coroutine
- def get(self):
- test.final_return = self.finish()
-
- class RenderHandler(RequestHandler):
- def create_template_loader(self, path):
- return DictLoader({'foo.html': 'hi'})
-
- @gen.coroutine
- def get(self):
- test.final_return = self.render('foo.html')
-
- return [("/finish", FinishHandler),
- ("/render", RenderHandler)]
-
- def get_app_kwargs(self):
- return dict(template_path='FinalReturnTest')
-
- def test_finish_method_return_future(self):
- response = self.fetch(self.get_url('/finish'))
- self.assertEqual(response.code, 200)
- self.assertIsInstance(self.final_return, Future)
-
- def test_render_method_return_future(self):
- response = self.fetch(self.get_url('/render'))
- self.assertEqual(response.code, 200)
- self.assertIsInstance(self.final_return, Future)
-
-
class CookieTest(WebTestCase):
def get_handlers(self):
class SetCookieHandler(RequestHandler):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 4
} | 5.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
-e git+https://github.com/tornadoweb/tornado.git@eb487cac3d829292ecca6e5124b1da5ae6bba407#egg=tornado
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tornado
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- flake8==5.0.4
- importlib-metadata==4.2.0
- mccabe==0.7.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
prefix: /opt/conda/envs/tornado
| [
"tornado/test/autoreload_test.py::AutoreloadTest::test_reload_wrapper_preservation"
] | [] | [
"tornado/test/autoreload_test.py::AutoreloadTest::test_reload_module",
"tornado/test/web_test.py::SecureCookieV1Test::test_arbitrary_bytes",
"tornado/test/web_test.py::SecureCookieV1Test::test_cookie_tampering_future_timestamp",
"tornado/test/web_test.py::SecureCookieV1Test::test_round_trip",
"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_increment_version",
"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_invalidate_version",
"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip",
"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip_differing_version",
"tornado/test/web_test.py::SecureCookieV2Test::test_round_trip",
"tornado/test/web_test.py::CookieTest::test_cookie_special_char",
"tornado/test/web_test.py::CookieTest::test_get_cookie",
"tornado/test/web_test.py::CookieTest::test_set_cookie",
"tornado/test/web_test.py::CookieTest::test_set_cookie_domain",
"tornado/test/web_test.py::CookieTest::test_set_cookie_expires_days",
"tornado/test/web_test.py::CookieTest::test_set_cookie_false_flags",
"tornado/test/web_test.py::CookieTest::test_set_cookie_max_age",
"tornado/test/web_test.py::CookieTest::test_set_cookie_overwrite",
"tornado/test/web_test.py::AuthRedirectTest::test_absolute_auth_redirect",
"tornado/test/web_test.py::AuthRedirectTest::test_relative_auth_redirect",
"tornado/test/web_test.py::ConnectionCloseTest::test_connection_close",
"tornado/test/web_test.py::RequestEncodingTest::test_error",
"tornado/test/web_test.py::RequestEncodingTest::test_group_encoding",
"tornado/test/web_test.py::RequestEncodingTest::test_group_question_mark",
"tornado/test/web_test.py::RequestEncodingTest::test_slashes",
"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument",
"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_invalid_unicode",
"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_plus",
"tornado/test/web_test.py::WSGISafeWebTest::test_get_argument",
"tornado/test/web_test.py::WSGISafeWebTest::test_get_body_arguments",
"tornado/test/web_test.py::WSGISafeWebTest::test_get_query_arguments",
"tornado/test/web_test.py::WSGISafeWebTest::test_header_injection",
"tornado/test/web_test.py::WSGISafeWebTest::test_multi_header",
"tornado/test/web_test.py::WSGISafeWebTest::test_no_gzip",
"tornado/test/web_test.py::WSGISafeWebTest::test_optional_path",
"tornado/test/web_test.py::WSGISafeWebTest::test_redirect",
"tornado/test/web_test.py::WSGISafeWebTest::test_reverse_url",
"tornado/test/web_test.py::WSGISafeWebTest::test_types",
"tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_resources",
"tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_unescaped",
"tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect",
"tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect_double_slash",
"tornado/test/web_test.py::NonWSGIWebTests::test_empty_flush",
"tornado/test/web_test.py::NonWSGIWebTests::test_flow_control",
"tornado/test/web_test.py::ErrorResponseTest::test_default",
"tornado/test/web_test.py::ErrorResponseTest::test_failed_write_error",
"tornado/test/web_test.py::ErrorResponseTest::test_write_error",
"tornado/test/web_test.py::StaticFileTest::test_absolute_static_url",
"tornado/test/web_test.py::StaticFileTest::test_absolute_version_exclusion",
"tornado/test/web_test.py::StaticFileTest::test_include_host_override",
"tornado/test/web_test.py::StaticFileTest::test_path_traversal_protection",
"tornado/test/web_test.py::StaticFileTest::test_relative_version_exclusion",
"tornado/test/web_test.py::StaticFileTest::test_root_static_path",
"tornado/test/web_test.py::StaticFileTest::test_static_304_etag_modified_bug",
"tornado/test/web_test.py::StaticFileTest::test_static_304_if_modified_since",
"tornado/test/web_test.py::StaticFileTest::test_static_304_if_none_match",
"tornado/test/web_test.py::StaticFileTest::test_static_404",
"tornado/test/web_test.py::StaticFileTest::test_static_compressed_files",
"tornado/test/web_test.py::StaticFileTest::test_static_etag",
"tornado/test/web_test.py::StaticFileTest::test_static_files",
"tornado/test/web_test.py::StaticFileTest::test_static_head",
"tornado/test/web_test.py::StaticFileTest::test_static_head_range",
"tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_pre_epoch",
"tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_time_zone",
"tornado/test/web_test.py::StaticFileTest::test_static_invalid_range",
"tornado/test/web_test.py::StaticFileTest::test_static_range_if_none_match",
"tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_invalid_start",
"tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_zero_suffix",
"tornado/test/web_test.py::StaticFileTest::test_static_url",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_end_edge",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_file",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_past_end",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_neg_end",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_partial_past_end",
"tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_filename",
"tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_redirect",
"tornado/test/web_test.py::StaticFileWithPathTest::test_serve",
"tornado/test/web_test.py::CustomStaticFileTest::test_serve",
"tornado/test/web_test.py::CustomStaticFileTest::test_static_url",
"tornado/test/web_test.py::HostMatchingTest::test_host_matching",
"tornado/test/web_test.py::DefaultHostMatchingTest::test_default_host_matching",
"tornado/test/web_test.py::NamedURLSpecGroupsTest::test_named_urlspec_groups",
"tornado/test/web_test.py::ClearHeaderTest::test_clear_header",
"tornado/test/web_test.py::Header204Test::test_204_headers",
"tornado/test/web_test.py::Header304Test::test_304_headers",
"tornado/test/web_test.py::StatusReasonTest::test_status",
"tornado/test/web_test.py::DateHeaderTest::test_date_header",
"tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str",
"tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str_from_httputil",
"tornado/test/web_test.py::RaiseWithReasonTest::test_raise_with_reason",
"tornado/test/web_test.py::ErrorHandlerXSRFTest::test_404_xsrf",
"tornado/test/web_test.py::ErrorHandlerXSRFTest::test_error_xsrf",
"tornado/test/web_test.py::GzipTestCase::test_gzip",
"tornado/test/web_test.py::GzipTestCase::test_gzip_not_requested",
"tornado/test/web_test.py::GzipTestCase::test_gzip_static",
"tornado/test/web_test.py::GzipTestCase::test_vary_already_present",
"tornado/test/web_test.py::GzipTestCase::test_vary_already_present_multiple",
"tornado/test/web_test.py::PathArgsInPrepareTest::test_kw",
"tornado/test/web_test.py::PathArgsInPrepareTest::test_pos",
"tornado/test/web_test.py::ClearAllCookiesTest::test_clear_all_cookies",
"tornado/test/web_test.py::ExceptionHandlerTest::test_http_error",
"tornado/test/web_test.py::ExceptionHandlerTest::test_known_error",
"tornado/test/web_test.py::ExceptionHandlerTest::test_unknown_error",
"tornado/test/web_test.py::BuggyLoggingTest::test_buggy_log_exception",
"tornado/test/web_test.py::UIMethodUIModuleTest::test_ui_method",
"tornado/test/web_test.py::GetArgumentErrorTest::test_catch_error",
"tornado/test/web_test.py::MultipleExceptionTest::test_multi_exception",
"tornado/test/web_test.py::SetLazyPropertiesTest::test_set_properties",
"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_is_lazy",
"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_works",
"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_works",
"tornado/test/web_test.py::UnimplementedHTTPMethodsTest::test_unimplemented_standard_methods",
"tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_other",
"tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_patch",
"tornado/test/web_test.py::AllHTTPMethodsTest::test_standard_methods",
"tornado/test/web_test.py::PatchMethodTest::test_other",
"tornado/test/web_test.py::PatchMethodTest::test_patch",
"tornado/test/web_test.py::FinishInPrepareTest::test_finish_in_prepare",
"tornado/test/web_test.py::Default404Test::test_404",
"tornado/test/web_test.py::Custom404Test::test_404",
"tornado/test/web_test.py::DefaultHandlerArgumentsTest::test_403",
"tornado/test/web_test.py::HandlerByNameTest::test_handler_by_name",
"tornado/test/web_test.py::StreamingRequestBodyTest::test_close_during_upload",
"tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return",
"tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return_with_data",
"tornado/test/web_test.py::StreamingRequestBodyTest::test_streaming_body",
"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_chunked_body",
"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_compressed_body",
"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_fixed_body",
"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_chunked_body",
"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_compressed_body",
"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_fixed_body",
"tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_high",
"tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_low",
"tornado/test/web_test.py::ClientCloseTest::test_client_close",
"tornado/test/web_test.py::SignedValueTest::test_expired",
"tornado/test/web_test.py::SignedValueTest::test_key_version_retrieval",
"tornado/test/web_test.py::SignedValueTest::test_key_versioning_invalid_key",
"tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_default_key",
"tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_non_default_key",
"tornado/test/web_test.py::SignedValueTest::test_known_values",
"tornado/test/web_test.py::SignedValueTest::test_name_swap",
"tornado/test/web_test.py::SignedValueTest::test_non_ascii",
"tornado/test/web_test.py::SignedValueTest::test_payload_tampering",
"tornado/test/web_test.py::SignedValueTest::test_signature_tampering",
"tornado/test/web_test.py::XSRFTest::test_cross_user",
"tornado/test/web_test.py::XSRFTest::test_distinct_tokens",
"tornado/test/web_test.py::XSRFTest::test_refresh_token",
"tornado/test/web_test.py::XSRFTest::test_versioning",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_argument_invalid_format",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_body_no_cookie",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_invalid_format",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_no_body",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_no_token",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_header",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_non_hex_token",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_post_body",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_query_string",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_short_token",
"tornado/test/web_test.py::XSRFCookieKwargsTest::test_xsrf_httponly",
"tornado/test/web_test.py::FinishExceptionTest::test_finish_exception",
"tornado/test/web_test.py::DecoratorTest::test_addslash",
"tornado/test/web_test.py::DecoratorTest::test_removeslash",
"tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_match",
"tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_not_match",
"tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_match",
"tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_not_match",
"tornado/test/web_test.py::CacheTest::test_strong_etag_match",
"tornado/test/web_test.py::CacheTest::test_strong_etag_not_match",
"tornado/test/web_test.py::CacheTest::test_weak_etag_match",
"tornado/test/web_test.py::CacheTest::test_weak_etag_not_match",
"tornado/test/web_test.py::CacheTest::test_wildcard_etag",
"tornado/test/web_test.py::RequestSummaryTest::test_missing_remote_ip",
"tornado/test/web_test.py::HTTPErrorTest::test_copy",
"tornado/test/web_test.py::ApplicationTest::test_listen",
"tornado/test/web_test.py::URLSpecReverseTest::test_non_reversible",
"tornado/test/web_test.py::URLSpecReverseTest::test_reverse",
"tornado/test/web_test.py::URLSpecReverseTest::test_reverse_arguments",
"tornado/test/web_test.py::RedirectHandlerTest::test_basic_redirect",
"tornado/test/web_test.py::RedirectHandlerTest::test_redirect_pattern",
"tornado/test/web_test.py::RedirectHandlerTest::test_redirect_with_appending_argument",
"tornado/test/web_test.py::RedirectHandlerTest::test_redirect_with_argument"
] | [] | Apache License 2.0 | 2,553 | 1,916 | [
"tornado/autoreload.py",
"tornado/iostream.py",
"tornado/netutil.py",
"tornado/web.py"
] |
|
tornadoweb__tornado-2394 | 50800f37b72c7a401cd49c948cb5be85cabbafea | 2018-05-20 00:48:24 | 6410cd98c1a5e938246a17cac0769f689ed471c5 | diff --git a/tornado/iostream.py b/tornado/iostream.py
index 89e1e234..63110a1a 100644
--- a/tornado/iostream.py
+++ b/tornado/iostream.py
@@ -1410,13 +1410,7 @@ class IOStream(BaseIOStream):
return future
def _handle_connect(self):
- try:
- err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- except socket.error as e:
- # Hurd doesn't allow SO_ERROR for loopback sockets because all
- # errors for such sockets are reported synchronously.
- if errno_from_exception(e) == errno.ENOPROTOOPT:
- err = 0
+ err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.error = socket.error(err, os.strerror(err))
# IOLoop implementations may vary: some of them return
diff --git a/tornado/netutil.py b/tornado/netutil.py
index e63683ad..08c9d886 100644
--- a/tornado/netutil.py
+++ b/tornado/netutil.py
@@ -138,12 +138,7 @@ def bind_sockets(port, address=None, family=socket.AF_UNSPEC,
raise
set_close_exec(sock.fileno())
if os.name != 'nt':
- try:
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except socket.error as e:
- if errno_from_exception(e) != errno.ENOPROTOOPT:
- # Hurd doesn't support SO_REUSEADDR.
- raise
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if reuse_port:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if af == socket.AF_INET6:
@@ -185,12 +180,7 @@ if hasattr(socket, 'AF_UNIX'):
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
set_close_exec(sock.fileno())
- try:
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- except socket.error as e:
- if errno_from_exception(e) != errno.ENOPROTOOPT:
- # Hurd doesn't support SO_REUSEADDR
- raise
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
try:
st = os.stat(file)
diff --git a/tornado/web.py b/tornado/web.py
index f970bd13..6760b0b9 100644
--- a/tornado/web.py
+++ b/tornado/web.py
@@ -749,7 +749,18 @@ class RequestHandler(object):
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
- """Renders the template with the given arguments as the response."""
+ """Renders the template with the given arguments as the response.
+
+ ``render()`` calls ``finish()``, so no other output methods can be called
+ after it.
+
+ Returns a `.Future` with the same semantics as the one returned by `finish`.
+ Awaiting this `.Future` is optional.
+
+ .. versionchanged:: 5.1
+
+ Now returns a `.Future` instead of ``None``.
+ """
if self._finished:
raise RuntimeError("Cannot render() after finish()")
html = self.render_string(template_name, **kwargs)
@@ -810,7 +821,7 @@ class RequestHandler(object):
if html_bodies:
hloc = html.index(b'</body>')
html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
- self.finish(html)
+ return self.finish(html)
def render_linked_js(self, js_files):
"""Default method used to render the final js links for the
@@ -993,7 +1004,20 @@ class RequestHandler(object):
return future
def finish(self, chunk=None):
- """Finishes this response, ending the HTTP request."""
+ """Finishes this response, ending the HTTP request.
+
+ Passing a ``chunk`` to ``finish()`` is equivalent to passing that
+ chunk to ``write()`` and then calling ``finish()`` with no arguments.
+
+ Returns a `.Future` which may optionally be awaited to track the sending
+ of the response to the client. This `.Future` resolves when all the response
+ data has been sent, and raises an error if the connection is closed before all
+ data can be sent.
+
+ .. versionchanged:: 5.1
+
+ Now returns a `.Future` instead of ``None``.
+ """
if self._finished:
raise RuntimeError("finish() called twice")
@@ -1025,12 +1049,13 @@ class RequestHandler(object):
# are keepalive connections)
self.request.connection.set_close_callback(None)
- self.flush(include_footers=True)
+ future = self.flush(include_footers=True)
self.request.connection.finish()
self._log()
self._finished = True
self.on_finish()
self._break_cycles()
+ return future
def detach(self):
"""Take control of the underlying stream.
| RequestHandler.finish should return a Future
`RequestHandler.finish` may call `flush()`, which returns a Future, but this Future is simply discarded. The main reason for that Future is flow control in streaming responses, which is no longer relevant by the time we are closing the connection, but it also contains errors if the stream is closed while the response is streamed. This error will be logged as a stack trace if left uncaught, so some applications may wish to await their calls to `finish()` to be able to catch it.
This logic also extends to `render()`, which calls `finish()`.
From https://github.com/tornadoweb/tornado/issues/2055#issuecomment-304456147 | tornadoweb/tornado | diff --git a/tornado/test/web_test.py b/tornado/test/web_test.py
index 45072aac..b77311df 100644
--- a/tornado/test/web_test.py
+++ b/tornado/test/web_test.py
@@ -191,6 +191,40 @@ class SecureCookieV2Test(unittest.TestCase):
self.assertEqual(new_handler.get_secure_cookie('foo'), None)
+class FinalReturnTest(WebTestCase):
+ def get_handlers(self):
+ test = self
+
+ class FinishHandler(RequestHandler):
+ @gen.coroutine
+ def get(self):
+ test.final_return = self.finish()
+
+ class RenderHandler(RequestHandler):
+ def create_template_loader(self, path):
+ return DictLoader({'foo.html': 'hi'})
+
+ @gen.coroutine
+ def get(self):
+ test.final_return = self.render('foo.html')
+
+ return [("/finish", FinishHandler),
+ ("/render", RenderHandler)]
+
+ def get_app_kwargs(self):
+ return dict(template_path='FinalReturnTest')
+
+ def test_finish_method_return_future(self):
+ response = self.fetch(self.get_url('/finish'))
+ self.assertEqual(response.code, 200)
+ self.assertIsInstance(self.final_return, Future)
+
+ def test_render_method_return_future(self):
+ response = self.fetch(self.get_url('/render'))
+ self.assertEqual(response.code, 200)
+ self.assertIsInstance(self.final_return, Future)
+
+
class CookieTest(WebTestCase):
def get_handlers(self):
class SetCookieHandler(RequestHandler):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 5.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
-e git+https://github.com/tornadoweb/tornado.git@50800f37b72c7a401cd49c948cb5be85cabbafea#egg=tornado
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tornado
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- flake8==5.0.4
- importlib-metadata==4.2.0
- mccabe==0.7.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
prefix: /opt/conda/envs/tornado
| [
"tornado/test/web_test.py::FinalReturnTest::test_finish_method_return_future",
"tornado/test/web_test.py::FinalReturnTest::test_render_method_return_future"
] | [] | [
"tornado/test/web_test.py::SecureCookieV1Test::test_arbitrary_bytes",
"tornado/test/web_test.py::SecureCookieV1Test::test_cookie_tampering_future_timestamp",
"tornado/test/web_test.py::SecureCookieV1Test::test_round_trip",
"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_increment_version",
"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_invalidate_version",
"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip",
"tornado/test/web_test.py::SecureCookieV2Test::test_key_version_roundtrip_differing_version",
"tornado/test/web_test.py::SecureCookieV2Test::test_round_trip",
"tornado/test/web_test.py::CookieTest::test_cookie_special_char",
"tornado/test/web_test.py::CookieTest::test_get_cookie",
"tornado/test/web_test.py::CookieTest::test_set_cookie",
"tornado/test/web_test.py::CookieTest::test_set_cookie_domain",
"tornado/test/web_test.py::CookieTest::test_set_cookie_expires_days",
"tornado/test/web_test.py::CookieTest::test_set_cookie_false_flags",
"tornado/test/web_test.py::CookieTest::test_set_cookie_max_age",
"tornado/test/web_test.py::CookieTest::test_set_cookie_overwrite",
"tornado/test/web_test.py::AuthRedirectTest::test_absolute_auth_redirect",
"tornado/test/web_test.py::AuthRedirectTest::test_relative_auth_redirect",
"tornado/test/web_test.py::ConnectionCloseTest::test_connection_close",
"tornado/test/web_test.py::RequestEncodingTest::test_error",
"tornado/test/web_test.py::RequestEncodingTest::test_group_encoding",
"tornado/test/web_test.py::RequestEncodingTest::test_group_question_mark",
"tornado/test/web_test.py::RequestEncodingTest::test_slashes",
"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument",
"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_invalid_unicode",
"tornado/test/web_test.py::WSGISafeWebTest::test_decode_argument_plus",
"tornado/test/web_test.py::WSGISafeWebTest::test_get_argument",
"tornado/test/web_test.py::WSGISafeWebTest::test_get_body_arguments",
"tornado/test/web_test.py::WSGISafeWebTest::test_get_query_arguments",
"tornado/test/web_test.py::WSGISafeWebTest::test_header_injection",
"tornado/test/web_test.py::WSGISafeWebTest::test_multi_header",
"tornado/test/web_test.py::WSGISafeWebTest::test_no_gzip",
"tornado/test/web_test.py::WSGISafeWebTest::test_optional_path",
"tornado/test/web_test.py::WSGISafeWebTest::test_redirect",
"tornado/test/web_test.py::WSGISafeWebTest::test_reverse_url",
"tornado/test/web_test.py::WSGISafeWebTest::test_types",
"tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_resources",
"tornado/test/web_test.py::WSGISafeWebTest::test_uimodule_unescaped",
"tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect",
"tornado/test/web_test.py::WSGISafeWebTest::test_web_redirect_double_slash",
"tornado/test/web_test.py::NonWSGIWebTests::test_empty_flush",
"tornado/test/web_test.py::NonWSGIWebTests::test_flow_control",
"tornado/test/web_test.py::ErrorResponseTest::test_default",
"tornado/test/web_test.py::ErrorResponseTest::test_failed_write_error",
"tornado/test/web_test.py::ErrorResponseTest::test_write_error",
"tornado/test/web_test.py::StaticFileTest::test_absolute_static_url",
"tornado/test/web_test.py::StaticFileTest::test_absolute_version_exclusion",
"tornado/test/web_test.py::StaticFileTest::test_include_host_override",
"tornado/test/web_test.py::StaticFileTest::test_path_traversal_protection",
"tornado/test/web_test.py::StaticFileTest::test_relative_version_exclusion",
"tornado/test/web_test.py::StaticFileTest::test_root_static_path",
"tornado/test/web_test.py::StaticFileTest::test_static_304_etag_modified_bug",
"tornado/test/web_test.py::StaticFileTest::test_static_304_if_modified_since",
"tornado/test/web_test.py::StaticFileTest::test_static_304_if_none_match",
"tornado/test/web_test.py::StaticFileTest::test_static_404",
"tornado/test/web_test.py::StaticFileTest::test_static_compressed_files",
"tornado/test/web_test.py::StaticFileTest::test_static_etag",
"tornado/test/web_test.py::StaticFileTest::test_static_files",
"tornado/test/web_test.py::StaticFileTest::test_static_head",
"tornado/test/web_test.py::StaticFileTest::test_static_head_range",
"tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_pre_epoch",
"tornado/test/web_test.py::StaticFileTest::test_static_if_modified_since_time_zone",
"tornado/test/web_test.py::StaticFileTest::test_static_invalid_range",
"tornado/test/web_test.py::StaticFileTest::test_static_range_if_none_match",
"tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_invalid_start",
"tornado/test/web_test.py::StaticFileTest::test_static_unsatisfiable_range_zero_suffix",
"tornado/test/web_test.py::StaticFileTest::test_static_url",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_end_edge",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_file",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_full_past_end",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_neg_end",
"tornado/test/web_test.py::StaticFileTest::test_static_with_range_partial_past_end",
"tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_filename",
"tornado/test/web_test.py::StaticDefaultFilenameTest::test_static_default_redirect",
"tornado/test/web_test.py::StaticFileWithPathTest::test_serve",
"tornado/test/web_test.py::CustomStaticFileTest::test_serve",
"tornado/test/web_test.py::CustomStaticFileTest::test_static_url",
"tornado/test/web_test.py::HostMatchingTest::test_host_matching",
"tornado/test/web_test.py::DefaultHostMatchingTest::test_default_host_matching",
"tornado/test/web_test.py::NamedURLSpecGroupsTest::test_named_urlspec_groups",
"tornado/test/web_test.py::ClearHeaderTest::test_clear_header",
"tornado/test/web_test.py::Header204Test::test_204_headers",
"tornado/test/web_test.py::Header304Test::test_304_headers",
"tornado/test/web_test.py::StatusReasonTest::test_status",
"tornado/test/web_test.py::DateHeaderTest::test_date_header",
"tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str",
"tornado/test/web_test.py::RaiseWithReasonTest::test_httperror_str_from_httputil",
"tornado/test/web_test.py::RaiseWithReasonTest::test_raise_with_reason",
"tornado/test/web_test.py::ErrorHandlerXSRFTest::test_404_xsrf",
"tornado/test/web_test.py::ErrorHandlerXSRFTest::test_error_xsrf",
"tornado/test/web_test.py::GzipTestCase::test_gzip",
"tornado/test/web_test.py::GzipTestCase::test_gzip_not_requested",
"tornado/test/web_test.py::GzipTestCase::test_gzip_static",
"tornado/test/web_test.py::GzipTestCase::test_vary_already_present",
"tornado/test/web_test.py::GzipTestCase::test_vary_already_present_multiple",
"tornado/test/web_test.py::PathArgsInPrepareTest::test_kw",
"tornado/test/web_test.py::PathArgsInPrepareTest::test_pos",
"tornado/test/web_test.py::ClearAllCookiesTest::test_clear_all_cookies",
"tornado/test/web_test.py::ExceptionHandlerTest::test_http_error",
"tornado/test/web_test.py::ExceptionHandlerTest::test_known_error",
"tornado/test/web_test.py::ExceptionHandlerTest::test_unknown_error",
"tornado/test/web_test.py::BuggyLoggingTest::test_buggy_log_exception",
"tornado/test/web_test.py::UIMethodUIModuleTest::test_ui_method",
"tornado/test/web_test.py::GetArgumentErrorTest::test_catch_error",
"tornado/test/web_test.py::MultipleExceptionTest::test_multi_exception",
"tornado/test/web_test.py::SetLazyPropertiesTest::test_set_properties",
"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_is_lazy",
"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_from_ui_module_works",
"tornado/test/web_test.py::GetCurrentUserTest::test_get_current_user_works",
"tornado/test/web_test.py::UnimplementedHTTPMethodsTest::test_unimplemented_standard_methods",
"tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_other",
"tornado/test/web_test.py::UnimplementedNonStandardMethodsTest::test_unimplemented_patch",
"tornado/test/web_test.py::AllHTTPMethodsTest::test_standard_methods",
"tornado/test/web_test.py::PatchMethodTest::test_other",
"tornado/test/web_test.py::PatchMethodTest::test_patch",
"tornado/test/web_test.py::FinishInPrepareTest::test_finish_in_prepare",
"tornado/test/web_test.py::Default404Test::test_404",
"tornado/test/web_test.py::Custom404Test::test_404",
"tornado/test/web_test.py::DefaultHandlerArgumentsTest::test_403",
"tornado/test/web_test.py::HandlerByNameTest::test_handler_by_name",
"tornado/test/web_test.py::StreamingRequestBodyTest::test_close_during_upload",
"tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return",
"tornado/test/web_test.py::StreamingRequestBodyTest::test_early_return_with_data",
"tornado/test/web_test.py::StreamingRequestBodyTest::test_streaming_body",
"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_chunked_body",
"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_compressed_body",
"tornado/test/web_test.py::DecoratedStreamingRequestFlowControlTest::test_flow_control_fixed_body",
"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_chunked_body",
"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_compressed_body",
"tornado/test/web_test.py::NativeStreamingRequestFlowControlTest::test_flow_control_fixed_body",
"tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_high",
"tornado/test/web_test.py::IncorrectContentLengthTest::test_content_length_too_low",
"tornado/test/web_test.py::ClientCloseTest::test_client_close",
"tornado/test/web_test.py::SignedValueTest::test_expired",
"tornado/test/web_test.py::SignedValueTest::test_key_version_retrieval",
"tornado/test/web_test.py::SignedValueTest::test_key_versioning_invalid_key",
"tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_default_key",
"tornado/test/web_test.py::SignedValueTest::test_key_versioning_read_write_non_default_key",
"tornado/test/web_test.py::SignedValueTest::test_known_values",
"tornado/test/web_test.py::SignedValueTest::test_name_swap",
"tornado/test/web_test.py::SignedValueTest::test_non_ascii",
"tornado/test/web_test.py::SignedValueTest::test_payload_tampering",
"tornado/test/web_test.py::SignedValueTest::test_signature_tampering",
"tornado/test/web_test.py::XSRFTest::test_cross_user",
"tornado/test/web_test.py::XSRFTest::test_distinct_tokens",
"tornado/test/web_test.py::XSRFTest::test_refresh_token",
"tornado/test/web_test.py::XSRFTest::test_versioning",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_argument_invalid_format",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_body_no_cookie",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_invalid_format",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_cookie_no_body",
"tornado/test/web_test.py::XSRFTest::test_xsrf_fail_no_token",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_header",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_non_hex_token",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_post_body",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_query_string",
"tornado/test/web_test.py::XSRFTest::test_xsrf_success_short_token",
"tornado/test/web_test.py::XSRFCookieKwargsTest::test_xsrf_httponly",
"tornado/test/web_test.py::FinishExceptionTest::test_finish_exception",
"tornado/test/web_test.py::DecoratorTest::test_addslash",
"tornado/test/web_test.py::DecoratorTest::test_removeslash",
"tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_match",
"tornado/test/web_test.py::CacheTest::test_multiple_strong_etag_not_match",
"tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_match",
"tornado/test/web_test.py::CacheTest::test_multiple_weak_etag_not_match",
"tornado/test/web_test.py::CacheTest::test_strong_etag_match",
"tornado/test/web_test.py::CacheTest::test_strong_etag_not_match",
"tornado/test/web_test.py::CacheTest::test_weak_etag_match",
"tornado/test/web_test.py::CacheTest::test_weak_etag_not_match",
"tornado/test/web_test.py::CacheTest::test_wildcard_etag",
"tornado/test/web_test.py::RequestSummaryTest::test_missing_remote_ip",
"tornado/test/web_test.py::HTTPErrorTest::test_copy",
"tornado/test/web_test.py::ApplicationTest::test_listen",
"tornado/test/web_test.py::URLSpecReverseTest::test_non_reversible",
"tornado/test/web_test.py::URLSpecReverseTest::test_reverse",
"tornado/test/web_test.py::URLSpecReverseTest::test_reverse_arguments",
"tornado/test/web_test.py::RedirectHandlerTest::test_basic_redirect",
"tornado/test/web_test.py::RedirectHandlerTest::test_redirect_pattern",
"tornado/test/web_test.py::RedirectHandlerTest::test_redirect_with_appending_argument",
"tornado/test/web_test.py::RedirectHandlerTest::test_redirect_with_argument"
] | [] | Apache License 2.0 | 2,554 | 1,235 | [
"tornado/iostream.py",
"tornado/netutil.py",
"tornado/web.py"
] |
|
marshmallow-code__marshmallow-821 | bfc6bedf291bb54f8623acc9380139c06bc8acb2 | 2018-05-20 02:24:22 | 8e217c8d6fefb7049ab3389f31a8d35824fa2d96 | diff --git a/marshmallow/fields.py b/marshmallow/fields.py
index ecfd28d4..737fbfb0 100755
--- a/marshmallow/fields.py
+++ b/marshmallow/fields.py
@@ -1134,6 +1134,15 @@ class Dict(Field):
'marshmallow.base.FieldABC')
self.key_container = keys
+ def _add_to_schema(self, field_name, schema):
+ super(Dict, self)._add_to_schema(field_name, schema)
+ if self.value_container:
+ self.value_container.parent = self
+ self.value_container.name = field_name
+ if self.key_container:
+ self.key_container.parent = self
+ self.key_container.name = field_name
+
def _serialize(self, value, attr, obj):
if value is None:
return None
| Question: How can I pass the context in a nested field of a structured dict?
I noticed that if you use a nested field for values in a structured Dict, the context is not automatically given to the nested schema. Is there a way to pass it the context?
Example:
```python
class Inner(Schema):
foo = fields.String()
@validates('foo')
def validate_foo(self, value):
if 'foo_context' not in self.context:
raise ValidationError('no context!')
class Outer(Schema):
bar = fields.Dict(values=fields.Nested(Inner))
# gives no error:
Inner(context={'foo_context': 'foo'}).load({'foo': 'some foo'})
# gives 'no context!' error:
Outer(context={'foo_context': 'foo'}).load({'bar': { 'key': {'foo': 'some foo'}}})
``` | marshmallow-code/marshmallow | diff --git a/tests/test_schema.py b/tests/test_schema.py
index 17c04300..9fee0d63 100755
--- a/tests/test_schema.py
+++ b/tests/test_schema.py
@@ -2134,6 +2134,27 @@ class TestContext:
outer.context['foo_context'] = 'foo'
assert outer.load({'bars': [{'foo': 42}]})
+ # Regression test for https://github.com/marshmallow-code/marshmallow/issues/820
+ def test_nested_dict_fields_inherit_context(self):
+ class InnerSchema(Schema):
+ foo = fields.Field()
+
+ @validates('foo')
+ def validate_foo(self, value):
+ if 'foo_context' not in self.context:
+ raise ValidationError('Missing context')
+
+ class OuterSchema(Schema):
+ bars = fields.Dict(values=fields.Nested(InnerSchema()))
+
+ inner = InnerSchema()
+ inner.context['foo_context'] = 'foo'
+ assert inner.load({'foo': 42})
+
+ outer = OuterSchema()
+ outer.context['foo_context'] = 'foo'
+ assert outer.load({'bars': {'test': {'foo': 42}}})
+
def test_serializer_can_specify_nested_object_as_attribute(blog):
class BlogUsernameSchema(Schema):
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 3.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[reco]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"dev-requirements.txt",
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==25.3.0
coverage==7.8.0
distlib==0.3.9
exceptiongroup==1.2.2
execnet==2.1.1
filelock==3.18.0
flake8==3.5.0
iniconfig==2.1.0
invoke==1.0.0
-e git+https://github.com/marshmallow-code/marshmallow.git@bfc6bedf291bb54f8623acc9380139c06bc8acb2#egg=marshmallow
mccabe==0.6.1
more-itertools==10.6.0
packaging==24.2
platformdirs==4.3.7
pluggy==1.5.0
py==1.11.0
pycodestyle==2.3.1
pyflakes==1.6.0
pytest==8.3.5
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
python-dateutil==2.7.3
pytz==2018.4
simplejson==3.15.0
six==1.17.0
toml==0.10.2
tomli==2.2.1
tox==3.12.1
typing_extensions==4.13.0
virtualenv==20.29.3
| name: marshmallow
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==25.3.0
- coverage==7.8.0
- distlib==0.3.9
- exceptiongroup==1.2.2
- execnet==2.1.1
- filelock==3.18.0
- flake8==3.5.0
- iniconfig==2.1.0
- invoke==1.0.0
- mccabe==0.6.1
- more-itertools==10.6.0
- packaging==24.2
- platformdirs==4.3.7
- pluggy==1.5.0
- py==1.11.0
- pycodestyle==2.3.1
- pyflakes==1.6.0
- pytest==8.3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- python-dateutil==2.7.3
- pytz==2018.4
- simplejson==3.15.0
- six==1.17.0
- toml==0.10.2
- tomli==2.2.1
- tox==3.12.1
- typing-extensions==4.13.0
- virtualenv==20.29.3
prefix: /opt/conda/envs/marshmallow
| [
"tests/test_schema.py::TestContext::test_nested_dict_fields_inherit_context"
] | [] | [
"tests/test_schema.py::test_serializing_basic_object[UserSchema]",
"tests/test_schema.py::test_serializing_basic_object[UserMetaSchema]",
"tests/test_schema.py::test_serializer_dump",
"tests/test_schema.py::test_dump_raises_with_dict_of_errors",
"tests/test_schema.py::test_dump_mode_raises_error[UserSchema]",
"tests/test_schema.py::test_dump_mode_raises_error[UserMetaSchema]",
"tests/test_schema.py::test_dump_resets_errors",
"tests/test_schema.py::test_load_resets_errors",
"tests/test_schema.py::test_load_validation_error_stores_input_data_and_valid_data",
"tests/test_schema.py::test_dump_validation_error_stores_partially_valid_data",
"tests/test_schema.py::test_dump_resets_error_fields",
"tests/test_schema.py::test_load_resets_error_fields",
"tests/test_schema.py::test_load_resets_error_kwargs",
"tests/test_schema.py::test_errored_fields_do_not_appear_in_output",
"tests/test_schema.py::test_load_many_stores_error_indices",
"tests/test_schema.py::test_dump_many",
"tests/test_schema.py::test_multiple_errors_can_be_stored_for_a_given_index",
"tests/test_schema.py::test_dump_many_stores_error_indices",
"tests/test_schema.py::test_dump_many_doesnt_stores_error_indices_when_index_errors_is_false",
"tests/test_schema.py::test_dump_returns_a_dict",
"tests/test_schema.py::test_dumps_returns_a_string",
"tests/test_schema.py::test_dumping_single_object_with_collection_schema",
"tests/test_schema.py::test_loading_single_object_with_collection_schema",
"tests/test_schema.py::test_dumps_many",
"tests/test_schema.py::test_load_returns_an_object",
"tests/test_schema.py::test_load_many",
"tests/test_schema.py::test_loads_returns_a_user",
"tests/test_schema.py::test_loads_many",
"tests/test_schema.py::test_loads_deserializes_from_json",
"tests/test_schema.py::test_serializing_none",
"tests/test_schema.py::test_default_many_symmetry",
"tests/test_schema.py::test_on_bind_field_hook",
"tests/test_schema.py::test_nested_on_bind_field_hook",
"tests/test_schema.py::TestValidate::test_validate_raises_with_errors_dict",
"tests/test_schema.py::TestValidate::test_validate_many",
"tests/test_schema.py::TestValidate::test_validate_many_doesnt_store_index_if_index_errors_option_is_false",
"tests/test_schema.py::TestValidate::test_validate",
"tests/test_schema.py::TestValidate::test_validate_required",
"tests/test_schema.py::test_fields_are_not_copies[UserSchema]",
"tests/test_schema.py::test_fields_are_not_copies[UserMetaSchema]",
"tests/test_schema.py::test_dumps_returns_json",
"tests/test_schema.py::test_naive_datetime_field",
"tests/test_schema.py::test_datetime_formatted_field",
"tests/test_schema.py::test_datetime_iso_field",
"tests/test_schema.py::test_tz_datetime_field",
"tests/test_schema.py::test_local_datetime_field",
"tests/test_schema.py::test_class_variable",
"tests/test_schema.py::test_serialize_many[UserSchema]",
"tests/test_schema.py::test_serialize_many[UserMetaSchema]",
"tests/test_schema.py::test_inheriting_schema",
"tests/test_schema.py::test_custom_field",
"tests/test_schema.py::test_url_field",
"tests/test_schema.py::test_relative_url_field",
"tests/test_schema.py::test_stores_invalid_url_error[UserSchema]",
"tests/test_schema.py::test_stores_invalid_url_error[UserMetaSchema]",
"tests/test_schema.py::test_email_field[UserSchema]",
"tests/test_schema.py::test_email_field[UserMetaSchema]",
"tests/test_schema.py::test_stored_invalid_email",
"tests/test_schema.py::test_integer_field",
"tests/test_schema.py::test_as_string",
"tests/test_schema.py::test_method_field[UserSchema]",
"tests/test_schema.py::test_method_field[UserMetaSchema]",
"tests/test_schema.py::test_function_field",
"tests/test_schema.py::test_prefix[UserSchema]",
"tests/test_schema.py::test_prefix[UserMetaSchema]",
"tests/test_schema.py::test_fields_must_be_declared_as_instances",
"tests/test_schema.py::test_serializing_generator[UserSchema]",
"tests/test_schema.py::test_serializing_generator[UserMetaSchema]",
"tests/test_schema.py::test_serializing_empty_list_returns_empty_list",
"tests/test_schema.py::test_serializing_dict",
"tests/test_schema.py::test_serializing_dict_with_meta_fields",
"tests/test_schema.py::test_exclude_in_init[UserSchema]",
"tests/test_schema.py::test_exclude_in_init[UserMetaSchema]",
"tests/test_schema.py::test_only_in_init[UserSchema]",
"tests/test_schema.py::test_only_in_init[UserMetaSchema]",
"tests/test_schema.py::test_invalid_only_param",
"tests/test_schema.py::test_can_serialize_uuid",
"tests/test_schema.py::test_can_serialize_time",
"tests/test_schema.py::test_invalid_time",
"tests/test_schema.py::test_invalid_date",
"tests/test_schema.py::test_invalid_dict_but_okay",
"tests/test_schema.py::test_json_module_is_deprecated",
"tests/test_schema.py::test_render_module",
"tests/test_schema.py::test_custom_error_message",
"tests/test_schema.py::test_load_errors_with_many",
"tests/test_schema.py::test_error_raised_if_fields_option_is_not_list",
"tests/test_schema.py::test_error_raised_if_additional_option_is_not_list",
"tests/test_schema.py::test_nested_custom_set_in_exclude_reusing_schema",
"tests/test_schema.py::test_nested_only",
"tests/test_schema.py::test_nested_only_inheritance",
"tests/test_schema.py::test_nested_only_empty_inheritance",
"tests/test_schema.py::test_nested_exclude",
"tests/test_schema.py::test_nested_exclude_inheritance",
"tests/test_schema.py::test_nested_only_and_exclude",
"tests/test_schema.py::test_nested_only_then_exclude_inheritance",
"tests/test_schema.py::test_nested_exclude_then_only_inheritance",
"tests/test_schema.py::test_nested_exclude_and_only_inheritance",
"tests/test_schema.py::test_meta_nested_exclude",
"tests/test_schema.py::test_nested_custom_set_not_implementing_getitem",
"tests/test_schema.py::test_deeply_nested_only_and_exclude",
"tests/test_schema.py::TestDeeplyNestedLoadOnly::test_load_only",
"tests/test_schema.py::TestDeeplyNestedLoadOnly::test_dump_only",
"tests/test_schema.py::TestDeeplyNestedListLoadOnly::test_load_only",
"tests/test_schema.py::TestDeeplyNestedListLoadOnly::test_dump_only",
"tests/test_schema.py::test_nested_constructor_only_and_exclude",
"tests/test_schema.py::test_only_and_exclude",
"tests/test_schema.py::test_exclude_invalid_attribute",
"tests/test_schema.py::test_only_with_invalid_attribute",
"tests/test_schema.py::test_only_bounded_by_fields",
"tests/test_schema.py::test_only_empty",
"tests/test_schema.py::test_nested_with_sets",
"tests/test_schema.py::test_meta_serializer_fields",
"tests/test_schema.py::test_meta_fields_mapping",
"tests/test_schema.py::test_meta_field_not_on_obj_raises_attribute_error",
"tests/test_schema.py::test_exclude_fields",
"tests/test_schema.py::test_fields_option_must_be_list_or_tuple",
"tests/test_schema.py::test_exclude_option_must_be_list_or_tuple",
"tests/test_schema.py::test_dateformat_option",
"tests/test_schema.py::test_default_dateformat",
"tests/test_schema.py::test_inherit_meta",
"tests/test_schema.py::test_inherit_meta_override",
"tests/test_schema.py::test_additional",
"tests/test_schema.py::test_cant_set_both_additional_and_fields",
"tests/test_schema.py::test_serializing_none_meta",
"tests/test_schema.py::TestHandleError::test_dump_with_custom_error_handler",
"tests/test_schema.py::TestHandleError::test_load_with_custom_error_handler",
"tests/test_schema.py::TestHandleError::test_load_with_custom_error_handler_and_partially_valid_data",
"tests/test_schema.py::TestHandleError::test_custom_error_handler_with_validates_decorator",
"tests/test_schema.py::TestHandleError::test_custom_error_handler_with_validates_schema_decorator",
"tests/test_schema.py::TestHandleError::test_validate_with_custom_error_handler",
"tests/test_schema.py::TestFieldValidation::test_errors_are_cleared_after_loading_collection",
"tests/test_schema.py::TestFieldValidation::test_raises_error_with_list",
"tests/test_schema.py::TestFieldValidation::test_raises_error_with_dict",
"tests/test_schema.py::TestFieldValidation::test_ignored_if_not_in_only",
"tests/test_schema.py::test_schema_repr",
"tests/test_schema.py::TestNestedSchema::test_flat_nested",
"tests/test_schema.py::TestNestedSchema::test_nested_many_with_missing_attribute",
"tests/test_schema.py::TestNestedSchema::test_nested_with_attribute_none",
"tests/test_schema.py::TestNestedSchema::test_flat_nested2",
"tests/test_schema.py::TestNestedSchema::test_nested_field_does_not_validate_required",
"tests/test_schema.py::TestNestedSchema::test_nested_none",
"tests/test_schema.py::TestNestedSchema::test_nested",
"tests/test_schema.py::TestNestedSchema::test_nested_many_fields",
"tests/test_schema.py::TestNestedSchema::test_nested_meta_many",
"tests/test_schema.py::TestNestedSchema::test_nested_only",
"tests/test_schema.py::TestNestedSchema::test_exclude",
"tests/test_schema.py::TestNestedSchema::test_list_field",
"tests/test_schema.py::TestNestedSchema::test_nested_load_many",
"tests/test_schema.py::TestNestedSchema::test_nested_errors",
"tests/test_schema.py::TestNestedSchema::test_nested_dump_errors",
"tests/test_schema.py::TestNestedSchema::test_nested_dump",
"tests/test_schema.py::TestNestedSchema::test_nested_method_field",
"tests/test_schema.py::TestNestedSchema::test_nested_function_field",
"tests/test_schema.py::TestNestedSchema::test_nested_prefixed_field",
"tests/test_schema.py::TestNestedSchema::test_nested_prefixed_many_field",
"tests/test_schema.py::TestNestedSchema::test_invalid_float_field",
"tests/test_schema.py::TestNestedSchema::test_serializer_meta_with_nested_fields",
"tests/test_schema.py::TestNestedSchema::test_serializer_with_nested_meta_fields",
"tests/test_schema.py::TestNestedSchema::test_nested_fields_must_be_passed_a_serializer",
"tests/test_schema.py::TestNestedSchema::test_invalid_type_passed_to_nested_field",
"tests/test_schema.py::TestNestedSchema::test_all_errors_on_many_nested_field_with_validates_decorator",
"tests/test_schema.py::TestNestedSchema::test_dump_validation_error",
"tests/test_schema.py::TestSelfReference::test_nesting_schema_within_itself",
"tests/test_schema.py::TestSelfReference::test_nesting_schema_by_passing_class_name",
"tests/test_schema.py::TestSelfReference::test_nesting_within_itself_meta",
"tests/test_schema.py::TestSelfReference::test_nested_self_with_only_param",
"tests/test_schema.py::TestSelfReference::test_multiple_nested_self_fields",
"tests/test_schema.py::TestSelfReference::test_nested_many",
"tests/test_schema.py::test_serialization_with_required_field",
"tests/test_schema.py::test_deserialization_with_required_field",
"tests/test_schema.py::test_deserialization_with_required_field_and_custom_validator",
"tests/test_schema.py::TestContext::test_context_method",
"tests/test_schema.py::TestContext::test_context_method_function",
"tests/test_schema.py::TestContext::test_function_field_raises_error_when_context_not_available",
"tests/test_schema.py::TestContext::test_function_field_handles_bound_serializer",
"tests/test_schema.py::TestContext::test_fields_context",
"tests/test_schema.py::TestContext::test_nested_fields_inherit_context",
"tests/test_schema.py::TestContext::test_nested_list_fields_inherit_context",
"tests/test_schema.py::test_serializer_can_specify_nested_object_as_attribute",
"tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_schema_subclass",
"tests/test_schema.py::TestFieldInheritance::test_inherit_fields_from_non_schema_subclass",
"tests/test_schema.py::TestFieldInheritance::test_inheritance_follows_mro",
"tests/test_schema.py::TestGetAttribute::test_get_attribute_is_used",
"tests/test_schema.py::TestGetAttribute::test_get_attribute_with_many",
"tests/test_schema.py::TestRequiredFields::test_required_string_field_missing",
"tests/test_schema.py::TestRequiredFields::test_required_string_field_failure",
"tests/test_schema.py::TestRequiredFields::test_allow_none_param",
"tests/test_schema.py::TestRequiredFields::test_allow_none_custom_message",
"tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_dump_output",
"tests/test_schema.py::TestDefaults::test_none_is_serialized_to_none",
"tests/test_schema.py::TestDefaults::test_default_and_value_missing",
"tests/test_schema.py::TestDefaults::test_loading_none",
"tests/test_schema.py::TestDefaults::test_missing_inputs_are_excluded_from_load_output",
"tests/test_schema.py::TestLoadOnly::test_load_only",
"tests/test_schema.py::TestLoadOnly::test_dump_only",
"tests/test_schema.py::TestLoadOnly::test_url_field_requre_tld_false"
] | [] | MIT License | 2,555 | 203 | [
"marshmallow/fields.py"
] |
|
tornadoweb__tornado-2397 | 6410cd98c1a5e938246a17cac0769f689ed471c5 | 2018-05-20 18:39:50 | 6410cd98c1a5e938246a17cac0769f689ed471c5 | ploxiln: functionally looks great | diff --git a/tornado/curl_httpclient.py b/tornado/curl_httpclient.py
index 54fc5b36..ef98225c 100644
--- a/tornado/curl_httpclient.py
+++ b/tornado/curl_httpclient.py
@@ -348,8 +348,8 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
curl.setopt(pycurl.PROXY, request.proxy_host)
curl.setopt(pycurl.PROXYPORT, request.proxy_port)
if request.proxy_username:
- credentials = '%s:%s' % (request.proxy_username,
- request.proxy_password)
+ credentials = httputil.encode_username_password(request.proxy_username,
+ request.proxy_password)
curl.setopt(pycurl.PROXYUSERPWD, credentials)
if (request.proxy_auth_mode is None or
@@ -441,8 +441,6 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
curl.setopt(pycurl.INFILESIZE, len(request.body or ''))
if request.auth_username is not None:
- userpwd = "%s:%s" % (request.auth_username, request.auth_password or '')
-
if request.auth_mode is None or request.auth_mode == "basic":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
elif request.auth_mode == "digest":
@@ -450,7 +448,9 @@ class CurlAsyncHTTPClient(AsyncHTTPClient):
else:
raise ValueError("Unsupported auth_mode %s" % request.auth_mode)
- curl.setopt(pycurl.USERPWD, native_str(userpwd))
+ userpwd = httputil.encode_username_password(request.auth_username,
+ request.auth_password)
+ curl.setopt(pycurl.USERPWD, userpwd)
curl_log.debug("%s %s (username: %r)", request.method, request.url,
request.auth_username)
else:
diff --git a/tornado/httputil.py b/tornado/httputil.py
index 22a64c31..d1ace5a8 100644
--- a/tornado/httputil.py
+++ b/tornado/httputil.py
@@ -29,11 +29,12 @@ import email.utils
import numbers
import re
import time
+import unicodedata
import warnings
from tornado.escape import native_str, parse_qs_bytes, utf8
from tornado.log import gen_log
-from tornado.util import ObjectDict, PY3
+from tornado.util import ObjectDict, PY3, unicode_type
if PY3:
import http.cookies as Cookie
@@ -949,6 +950,20 @@ def _encode_header(key, pdict):
return '; '.join(out)
+def encode_username_password(username, password):
+ """Encodes a username/password pair in the format used by HTTP auth.
+
+ The return value is a byte string in the form ``username:password``.
+
+ .. versionadded:: 5.1
+ """
+ if isinstance(username, unicode_type):
+ username = unicodedata.normalize('NFC', username)
+ if isinstance(password, unicode_type):
+ password = unicodedata.normalize('NFC', password)
+ return utf8(username) + b":" + utf8(password)
+
+
def doctests():
import doctest
return doctest.DocTestSuite()
diff --git a/tornado/simple_httpclient.py b/tornado/simple_httpclient.py
index 4df4898a..35c71936 100644
--- a/tornado/simple_httpclient.py
+++ b/tornado/simple_httpclient.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import, division, print_function
-from tornado.escape import utf8, _unicode
+from tornado.escape import _unicode
from tornado import gen
from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy
from tornado import httputil
@@ -308,9 +308,9 @@ class _HTTPConnection(httputil.HTTPMessageDelegate):
if self.request.auth_mode not in (None, "basic"):
raise ValueError("unsupported auth_mode %s",
self.request.auth_mode)
- auth = utf8(username) + b":" + utf8(password)
- self.request.headers["Authorization"] = (b"Basic " +
- base64.b64encode(auth))
+ self.request.headers["Authorization"] = (
+ b"Basic " + base64.b64encode(
+ httputil.encode_username_password(username, password)))
if self.request.user_agent:
self.request.headers["User-Agent"] = self.request.user_agent
if not self.request.allow_nonstandard_methods:
| Unable to use non-ascii characters in user/password for basic auth in curl_httpclient
Steps to reproduce (Python 3.4):
1. Create tornado.httpclient.HTTPRequest with auth_username or auth_password which contains non-ascii (lower range, 0-128), for example pound sterling £ (which is 153 in ascii).
2. Execute curl_httpclient fetch using that request
Expected result:
1. The request is successfully completed
Actual result:
2. HTTP 599 is returned and internal exception is:
'ascii' codec can't encode character '\xa3' in position 55: ordinal not in range(128)
I am not sure if I am not aware of the proper solution, but I have tried providing bytes as auth_password, but it does not solve the issue because https://github.com/tornadoweb/tornado/blob/master/tornado/curl_httpclient.py#L438 internally uses string formatting. Reading through pycurl docs (http://pycurl.io/docs/latest/unicode.html) suggests that for Python3 bytes array should be used when using curl setopt. It seems like Python3 vs Python2 issue? | tornadoweb/tornado | diff --git a/tornado/test/curl_httpclient_test.py b/tornado/test/curl_httpclient_test.py
index b7a85952..4230d4cd 100644
--- a/tornado/test/curl_httpclient_test.py
+++ b/tornado/test/curl_httpclient_test.py
@@ -32,13 +32,15 @@ class CurlHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
class DigestAuthHandler(RequestHandler):
+ def initialize(self, username, password):
+ self.username = username
+ self.password = password
+
def get(self):
realm = 'test'
opaque = 'asdf'
# Real implementations would use a random nonce.
nonce = "1234"
- username = 'foo'
- password = 'bar'
auth_header = self.request.headers.get('Authorization', None)
if auth_header is not None:
@@ -53,9 +55,9 @@ class DigestAuthHandler(RequestHandler):
assert param_dict['realm'] == realm
assert param_dict['opaque'] == opaque
assert param_dict['nonce'] == nonce
- assert param_dict['username'] == username
+ assert param_dict['username'] == self.username
assert param_dict['uri'] == self.request.path
- h1 = md5(utf8('%s:%s:%s' % (username, realm, password))).hexdigest()
+ h1 = md5(utf8('%s:%s:%s' % (self.username, realm, self.password))).hexdigest()
h2 = md5(utf8('%s:%s' % (self.request.method,
self.request.path))).hexdigest()
digest = md5(utf8('%s:%s:%s' % (h1, nonce, h2))).hexdigest()
@@ -88,7 +90,8 @@ class CurlHTTPClientTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
- ('/digest', DigestAuthHandler),
+ ('/digest', DigestAuthHandler, {'username': 'foo', 'password': 'bar'}),
+ ('/digest_non_ascii', DigestAuthHandler, {'username': 'foo', 'password': 'barユ£'}),
('/custom_reason', CustomReasonHandler),
('/custom_fail_reason', CustomFailReasonHandler),
])
@@ -143,3 +146,8 @@ class CurlHTTPClientTestCase(AsyncHTTPTestCase):
# during the setup phase doesn't lead the request to
# be dropped on the floor.
response = self.fetch(u'/ユニコード', raise_error=True)
+
+ def test_digest_auth_non_ascii(self):
+ response = self.fetch('/digest_non_ascii', auth_mode='digest',
+ auth_username='foo', auth_password='barユ£')
+ self.assertEqual(response.body, b'ok')
diff --git a/tornado/test/httpclient_test.py b/tornado/test/httpclient_test.py
index 60c8f490..fb8b12d5 100644
--- a/tornado/test/httpclient_test.py
+++ b/tornado/test/httpclient_test.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import base64
@@ -8,6 +9,7 @@ import sys
import threading
import datetime
from io import BytesIO
+import unicodedata
from tornado.escape import utf8, native_str
from tornado import gen
@@ -237,6 +239,7 @@ Transfer-Encoding: chunked
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
+ # This test data appears in section 2 of RFC 7617.
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
@@ -247,6 +250,20 @@ Transfer-Encoding: chunked
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
+ def test_basic_auth_unicode(self):
+ # This test data appears in section 2.1 of RFC 7617.
+ self.assertEqual(self.fetch("/auth", auth_username="test",
+ auth_password="123£").body,
+ b"Basic dGVzdDoxMjPCow==")
+
+ # The standard mandates NFC. Give it a decomposed username
+ # and ensure it is normalized to composed form.
+ username = unicodedata.normalize("NFD", u"josé")
+ self.assertEqual(self.fetch("/auth",
+ auth_username=username,
+ auth_password="səcrət").body,
+ b"Basic am9zw6k6c8mZY3LJmXQ=")
+
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | 5.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"flake8"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
flake8==5.0.4
importlib-metadata==4.2.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
mccabe==0.7.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pycodestyle==2.9.1
pyflakes==2.5.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
-e git+https://github.com/tornadoweb/tornado.git@6410cd98c1a5e938246a17cac0769f689ed471c5#egg=tornado
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: tornado
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- flake8==5.0.4
- importlib-metadata==4.2.0
- mccabe==0.7.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
prefix: /opt/conda/envs/tornado
| [
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_basic_auth_unicode"
] | [] | [
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_304_with_content_length",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_all_methods",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_basic_auth",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_basic_auth_explicit_mode",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_body_encoding",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_body_sanity_checks",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_chunked",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_chunked_close",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_configure_defaults",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_credentials_in_url",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_final_callback_stack_context",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_follow_redirect",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_future_http_error",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_future_http_error_no_raise",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_future_interface",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_header_callback",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_header_callback_stack_context",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_header_types",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_hello_world",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_multi_line_headers",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_non_ascii_header",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_patch_receives_payload",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_post",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_put_307",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_reuse_request_from_response",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_streaming_callback",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_streaming_stack_context",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_types",
"tornado/test/httpclient_test.py::HTTPClientCommonTestCase::test_unsupported_auth_mode",
"tornado/test/httpclient_test.py::RequestProxyTest::test_bad_attribute",
"tornado/test/httpclient_test.py::RequestProxyTest::test_both_set",
"tornado/test/httpclient_test.py::RequestProxyTest::test_default_set",
"tornado/test/httpclient_test.py::RequestProxyTest::test_defaults_none",
"tornado/test/httpclient_test.py::RequestProxyTest::test_neither_set",
"tornado/test/httpclient_test.py::RequestProxyTest::test_request_set",
"tornado/test/httpclient_test.py::HTTPResponseTestCase::test_str",
"tornado/test/httpclient_test.py::SyncHTTPClientTest::test_sync_client",
"tornado/test/httpclient_test.py::SyncHTTPClientTest::test_sync_client_error",
"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_body",
"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_body_setter",
"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_headers",
"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_headers_setter",
"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_if_modified_since",
"tornado/test/httpclient_test.py::HTTPRequestTestCase::test_null_headers_setter",
"tornado/test/httpclient_test.py::HTTPErrorTestCase::test_copy",
"tornado/test/httpclient_test.py::HTTPErrorTestCase::test_error_with_response",
"tornado/test/httpclient_test.py::HTTPErrorTestCase::test_plain_error"
] | [] | Apache License 2.0 | 2,559 | 1,059 | [
"tornado/curl_httpclient.py",
"tornado/httputil.py",
"tornado/simple_httpclient.py"
] |
catmaid__catpy-25 | ab4f858dda1144bec732738f406054248af7103d | 2018-05-21 18:56:37 | ab4f858dda1144bec732738f406054248af7103d | diff --git a/catpy/__init__.py b/catpy/__init__.py
index feba40e..895073e 100644
--- a/catpy/__init__.py
+++ b/catpy/__init__.py
@@ -10,3 +10,5 @@ __all__ = ['client']
from catpy.client import CatmaidClient, CoordinateTransformer, CatmaidUrl # noqa
+from catpy import image # noqa
+from catpy import export # noqa
diff --git a/catpy/image.py b/catpy/image.py
index a601889..cd25378 100644
--- a/catpy/image.py
+++ b/catpy/image.py
@@ -5,6 +5,8 @@ from __future__ import division, unicode_literals
import logging
from io import BytesIO
from collections import OrderedDict
+
+from requests import HTTPError
from timeit import timeit
import itertools
from warnings import warn
@@ -392,6 +394,12 @@ class Stack(object):
class ProjectStack(Stack):
+ orientation_choices = {
+ 0: "xy",
+ 1: "xz",
+ 2: "zy",
+ }
+
def __init__(self, dimension, translation, resolution, orientation, broken_slices=None, canary_location=None):
"""
Representation of an image stack as it pertains to a CATMAID project
@@ -430,7 +438,8 @@ class ProjectStack(Stack):
"""
stack = cls(
stack_info['dimension'], stack_info['translation'], stack_info['resolution'],
- stack_info['orientation'], stack_info['broken_slices'], stack_info['canary_location']
+ cls.orientation_choices[stack_info['orientation']], stack_info['broken_slices'],
+ stack_info['canary_location']
)
mirrors = [StackMirror.from_dict(d) for d in stack_info['mirrors']]
@@ -670,6 +679,12 @@ class ImageFetcher(object):
raise ValueError('Unknown dimension of volume: should be 2D or 3D')
return np.moveaxis(arr, (0, 1, 2), self._dimension_mappings)
+ def _make_empty_tile(self, width, height=None):
+ height = height or width
+ tile = np.empty((height, width), dtype=np.uint8)
+ tile.fill(self.cval)
+ return tile
+
def _get_tile(self, tile_index):
"""
Get the tile from the cache, handle broken slices, or fetch.
@@ -689,9 +704,7 @@ class ImageFetcher(object):
if tile_index.depth in self.stack.broken_slices:
if self.broken_slice_handling == BrokenSliceHandling.FILL and self.cval is not None:
- tile = np.empty((tile_index.width, tile_index.height))
- tile.fill(self.cval)
- return tile
+ return self._make_empty_tile(tile_index.width, tile_index.height)
else:
raise NotImplementedError(
"'fill' with a non-None cval is the only implemented broken slice handling mode"
@@ -813,7 +826,14 @@ class ImageFetcher(object):
Future of np.ndarray in source orientation
"""
url = self.mirror.generate_url(tile_index)
- return response_to_array(self._session.get(url, timeout=self.timeout))
+ try:
+ return response_to_array(self._session.get(url, timeout=self.timeout))
+ except HTTPError as e:
+ if e.response.status_code == 404:
+ logger.warning("Tile not found at %s (error 404), returning blank tile", url)
+ return self._make_empty_tile(tile_index.width, tile_index.height)
+ else:
+ raise
def _reorient_roi_tgt_to_src(self, roi_tgt):
return roi_tgt[:, self._dimension_mappings]
| Instantiating ImageFetcher from CatMaid fails due to integer orientation | catmaid/catpy | diff --git a/tests/test_image.py b/tests/test_image.py
index 7c02cf8..b73fa41 100644
--- a/tests/test_image.py
+++ b/tests/test_image.py
@@ -6,6 +6,7 @@ import requests
from PIL import Image
from io import BytesIO
from concurrent.futures import Future
+from requests import HTTPError
try:
import mock
@@ -508,7 +509,7 @@ def test_imagefetcher_set_mirror_title_warns_no_match(min_fetcher):
def test_imagefetcher_set_mirror_title_warns_too_many(min_fetcher):
min_fetcher.stack.mirrors.append(StackMirror(IMAGE_BASE, 1, 1, TILE_SOURCE_TYPE, 'png', 'title0', 10))
- with pytest.warns(UserWarning, match='does not exist'):
+ with pytest.warns(UserWarning, match='ore than one'):
min_fetcher.mirror = 'title0'
assert min_fetcher._mirror == min_fetcher.stack.mirrors[0]
@@ -746,3 +747,17 @@ def test_imagefetcher_get_wrappers(min_fetcher, space):
min_fetcher.get = mock.Mock()
getattr(min_fetcher, 'get_{}_space'.format(space.value))('roi', 'zoom_level')
min_fetcher.get.assert_called_with('roi', space, 'zoom_level', None)
+
+
+def test_404_handled_correctly(min_fetcher):
+ idx = TileIndex(0, 0, 0, 0, 100, 100)
+ min_fetcher._session.get = mock.Mock(side_effect=HTTPError(response=mock.Mock(status_code=404)))
+ with mock.patch('catpy.image.response_to_array', mock.Mock()):
+ tile = min_fetcher._fetch(idx)
+ assert tile.shape == (100, 100)
+ assert (tile == 0).sum() == tile.size
+
+
[email protected](reason="404 handling not implemented for threaded fetcher")
+def test_404_handled_correctly_threaded(min_fetcher):
+ assert False
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements/prod.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
-e git+https://github.com/catmaid/catpy.git@ab4f858dda1144bec732738f406054248af7103d#egg=catpy
certifi==2021.5.30
coverage==6.2
decorator==5.1.1
execnet==1.9.0
importlib-metadata==4.8.3
iniconfig==1.1.1
networkx==1.11
numpy==1.12.1
packaging==21.3
Pillow==5.0.0
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
requests==2.14.2
requests-futures==0.9.7
six==1.10.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: catpy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- coverage==6.2
- decorator==5.1.1
- execnet==1.9.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- networkx==1.11
- numpy==1.12.1
- packaging==21.3
- pillow==5.0.0
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- requests==2.14.2
- requests-futures==0.9.7
- six==1.10.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/catpy
| [
"tests/test_image.py::test_404_handled_correctly"
] | [] | [
"tests/test_image.py::test_vol_maker[0-shape0-1]",
"tests/test_image.py::test_vol_maker[1-shape1-2]",
"tests/test_image.py::test_vol_maker[0-shape2-10]",
"tests/test_image.py::test_vol_maker[0-shape3-46]",
"tests/test_image.py::test_vol_maker[1-shape4-47]",
"tests/test_image.py::test_response_to_array_png[L]",
"tests/test_image.py::test_response_to_array_png[RGB]",
"tests/test_image.py::test_response_to_array_png[RGBA]",
"tests/test_image.py::test_response_to_array_jpeg[L]",
"tests/test_image.py::test_response_to_array_jpeg[RGB]",
"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.FILE_BASED-{image_base}{{depth}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}]",
"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.FILE_BASED_WITH_ZOOM_DIRS-{image_base}{{depth}}/{{zoom_level}}/{{row}}_{{col}}.{file_extension}]",
"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.DIR_BASED-{image_base}{{zoom_level}}/{{depth}}/{{row}}/{{col}}.{file_extension}]",
"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.RENDER_SERVICE-{image_base}largeDataTileSource/{tile_width}/{tile_height}/{{zoom_level}}/{{depth}}/{{row}}/{{col}}.{file_extension}]",
"tests/test_image.py::test_predefined_format_urls_are_valid[TileSourceType.FLIXSERVER-{image_base}{{depth}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}]",
"tests/test_image.py::test_as_future_for_not_future",
"tests/test_image.py::test_as_future_for_future",
"tests/test_image.py::test_fill_tiled_cuboid",
"tests/test_image.py::test_fill_tiled_cuboid_raises",
"tests/test_image.py::test_dict_subtract_mismatched_keys",
"tests/test_image.py::test_dict_subtract",
"tests/test_image.py::test_tile_index_coords",
"tests/test_image.py::test_tile_index_comparable[zoom_level]",
"tests/test_image.py::test_tile_index_comparable[height]",
"tests/test_image.py::test_tile_index_comparable[width]",
"tests/test_image.py::test_tile_index_url_kwargs",
"tests/test_image.py::test_stackmirror_corrects_image_base",
"tests/test_image.py::test_stackmirror_corrects_file_extension",
"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.FILE_BASED]",
"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.FILE_BASED_WITH_ZOOM_DIRS]",
"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.DIR_BASED]",
"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.RENDER_SERVICE]",
"tests/test_image.py::test_stackmirror_formats_url[TileSourceType.FLIXSERVER]",
"tests/test_image.py::test_stackmirror_raises_on_incompatible_tile_index",
"tests/test_image.py::test_stackmirror_get_tile_index",
"tests/test_image.py::test_stack_sets_broken_slices_canary",
"tests/test_image.py::test_stack_fastest_mirror_calls_get",
"tests/test_image.py::test_stack_fastest_mirror_raises",
"tests/test_image.py::test_tilecache_can_set",
"tests/test_image.py::test_tilecache_set_refreshes_old",
"tests/test_image.py::test_tilecache_can_get",
"tests/test_image.py::test_tilecache_lru",
"tests/test_image.py::test_tilecache_can_clear",
"tests/test_image.py::test_tilecache_can_constrain_len",
"tests/test_image.py::test_tilecache_can_constrain_bytes",
"tests/test_image.py::test_imagefetcher_can_instantiate",
"tests/test_image.py::test_imagefetcher_mirror_fallback_warning",
"tests/test_image.py::test_imagefetcher_set_mirror_none",
"tests/test_image.py::test_imagefetcher_set_mirror_mirror",
"tests/test_image.py::test_imagefetcher_set_mirror_mirror_raises",
"tests/test_image.py::test_imagefetcher_set_mirror_int",
"tests/test_image.py::test_imagefetcher_set_mirror_int_as_str",
"tests/test_image.py::test_imagefetcher_set_mirror_position_warns_no_match",
"tests/test_image.py::test_imagefetcher_set_mirror_position_warns_too_many",
"tests/test_image.py::test_imagefetcher_set_mirror_title",
"tests/test_image.py::test_imagefetcher_set_mirror_title_warns_no_match",
"tests/test_image.py::test_imagefetcher_set_mirror_title_warns_too_many",
"tests/test_image.py::test_imagefetcher_get_auth_default",
"tests/test_image.py::test_imagefetcher_get_auth_from_mirror",
"tests/test_image.py::test_imagefetcher_get_auth_fallback",
"tests/test_image.py::test_imagefetcher_clear_cache",
"tests/test_image.py::test_imagefetcher_map_dimensions",
"tests/test_image.py::test_imagefetcher_reorient",
"tests/test_image.py::test_imagefetcher_reorient_expands",
"tests/test_image.py::test_imagefetcher_reorient_throws",
"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi0-expected_drc0-expected_yx_minmax0]",
"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi1-expected_drc1-expected_yx_minmax1]",
"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi2-expected_drc2-expected_yx_minmax2]",
"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi3-expected_drc3-expected_yx_minmax3]",
"tests/test_image.py::test_imagefetcher_roi_to_tiles[roi4-expected_drc4-expected_yx_minmax4]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-scaled-0-expected0]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-stack-0-expected1]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-stack--2-expected2]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-stack-1-expected3]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-project-0-expected4]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-project--2-expected5]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ImageFetcher-project-1-expected6]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-scaled-0-expected0]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-stack-0-expected1]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-stack--2-expected2]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-stack-1-expected3]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-project-0-expected4]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-project--2-expected5]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled[ThreadedImageFetcher-project-1-expected6]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled_raises[ImageFetcher]",
"tests/test_image.py::test_imagefetcher_roi_to_scaled_raises[ThreadedImageFetcher]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi0-1]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi1-2]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi2-1]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi3-2]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi4-2]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi5-4]",
"tests/test_image.py::test_imagefetcher_get[ImageFetcher-roi6-12]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi0-1]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi1-2]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi2-1]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi3-2]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi4-2]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi5-4]",
"tests/test_image.py::test_imagefetcher_get[ThreadedImageFetcher-roi6-12]",
"tests/test_image.py::test_imagefetcher_get_into_array[ImageFetcher]",
"tests/test_image.py::test_imagefetcher_get_into_array[ThreadedImageFetcher]",
"tests/test_image.py::test_imagefetcher_get_tile_from_cache[ImageFetcher]",
"tests/test_image.py::test_imagefetcher_get_tile_from_cache[ThreadedImageFetcher]",
"tests/test_image.py::test_imagefetcher_get_tile_from_broken_slice[ImageFetcher]",
"tests/test_image.py::test_imagefetcher_get_tile_from_broken_slice[ThreadedImageFetcher]",
"tests/test_image.py::test_imagefetcher_get_tile_from_fetch",
"tests/test_image.py::test_imagefetcher_fetch",
"tests/test_image.py::test_imagefetcher_get_wrappers[stack]",
"tests/test_image.py::test_imagefetcher_get_wrappers[scaled]",
"tests/test_image.py::test_imagefetcher_get_wrappers[project]"
] | [] | MIT License | 2,565 | 871 | [
"catpy/__init__.py",
"catpy/image.py"
] |
|
tomMoral__loky-128 | 7e46ee602a23251f476312357c00fd13f77f9938 | 2018-05-23 10:17:18 | 1bf741a4796d15c517902a3331b5bd9e86502037 | ogrisel: > For the future, should we do the same as in joblib and run the test of joblib as part of the CI?
I agree but let's do that in another PR. | diff --git a/loky/backend/semaphore_tracker.py b/loky/backend/semaphore_tracker.py
index 79587f2..f494237 100644
--- a/loky/backend/semaphore_tracker.py
+++ b/loky/backend/semaphore_tracker.py
@@ -203,7 +203,6 @@ def main(fd):
try:
sem_unlink(name)
if VERBOSE: # pragma: no cover
- name = name.decode('ascii')
sys.stderr.write("[SemaphoreTracker] unlink {}\n"
.format(name))
sys.stderr.flush()
diff --git a/loky/backend/semlock.py b/loky/backend/semlock.py
index c94c4cd..2d35f6a 100644
--- a/loky/backend/semlock.py
+++ b/loky/backend/semlock.py
@@ -68,7 +68,7 @@ if sys.version_info[:2] < (3, 3):
def sem_unlink(name):
- if pthread.sem_unlink(name) < 0:
+ if pthread.sem_unlink(name.encode('ascii')) < 0:
raiseFromErrno()
@@ -153,8 +153,8 @@ class SemLock(object):
self.ident = 0
self.kind = kind
self.maxvalue = maxvalue
- self.name = name.encode('ascii')
- self.handle = _sem_open(self.name, value)
+ self.name = name
+ self.handle = _sem_open(self.name.encode('ascii'), value)
def __del__(self):
try:
@@ -265,7 +265,7 @@ class SemLock(object):
self.kind = kind
self.maxvalue = maxvalue
self.name = name
- self.handle = _sem_open(name)
+ self.handle = _sem_open(name.encode('ascii'))
return self
diff --git a/loky/backend/synchronize.py b/loky/backend/synchronize.py
index 2cdb43d..4773b9d 100644
--- a/loky/backend/synchronize.py
+++ b/loky/backend/synchronize.py
@@ -121,8 +121,7 @@ class SemLock(object):
@staticmethod
def _make_name():
# OSX does not support long names for semaphores
- name = '/loky-%i-%s' % (os.getpid(), next(SemLock._rand))
- return name
+ return '/loky-%i-%s' % (os.getpid(), next(SemLock._rand))
#
| loky.backend.semaphore_tracker.sem_unlink does not have same signature if coming from ctypes or _multiprocessing
* `_multi_processing.sem_unlink` takes `str`
* `loky.backend.semlock.sem_unlink` comes from `ctypes` and take `bytes`.
It feels like some code was written with the ctypes variant in mind and raise an error when the `_multiprocessing.sem_unlink` is called. Tests seem to be only testing `loky.backend.semlock.sem_unlink`.
#### Context
This is an error I just saw in a joblib Travis [build](https://travis-ci.org/joblib/joblib/jobs/346847911#L4044). Note this is with loky version 1.2.1.
```
E /home/travis/build/joblib/joblib/joblib/externals/loky/backend/semaphore_tracker.py:195: UserWarning: semaphore_tracker: There appear to be 6 leaked semaphores to clean up at shutdown
E len(cache))
E /home/travis/build/joblib/joblib/joblib/externals/loky/backend/semaphore_tracker.py:211: UserWarning: semaphore_tracker: b'/loky-5456-6haleho6': TypeError('argument 1 must be str, not bytes',)
E warnings.warn('semaphore_tracker: %r: %r' % (name, e))
```
Quickly looking at it, it seems like this is still in master. The code where the warning happens is here:
https://github.com/tomMoral/loky/blob/dec1c8144b12938dfe7bfc511009e12f25fd1cd9/loky/backend/semaphore_tracker.py#L203-L211
| tomMoral/loky | diff --git a/tests/test_synchronize.py b/tests/test_synchronize.py
index 797070d..4794f17 100644
--- a/tests/test_synchronize.py
+++ b/tests/test_synchronize.py
@@ -22,7 +22,7 @@ if sys.version_info < (3, 3):
@pytest.mark.skipif(sys.platform == "win32", reason="UNIX test")
def test_semlock_failure():
from loky.backend.semlock import SemLock, sem_unlink
- name = "test1"
+ name = "loky-test-semlock"
sl = SemLock(0, 1, 1, name=name)
with pytest.raises(FileExistsError):
@@ -30,7 +30,7 @@ def test_semlock_failure():
sem_unlink(sl.name)
with pytest.raises(FileNotFoundError):
- SemLock._rebuild(None, 0, 0, name.encode('ascii'))
+ SemLock._rebuild(None, 0, 0, name)
def assert_sem_value_equal(sem, value):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 2
},
"num_modified_files": 3
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"psutil",
"pytest-timeout",
"coverage"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
cloudpickle==2.2.1
coverage==6.2
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/tomMoral/loky.git@7e46ee602a23251f476312357c00fd13f77f9938#egg=loky
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
psutil==7.0.0
py @ file:///opt/conda/conda-bld/py_1644396412707/work
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-timeout==2.1.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: loky
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- cloudpickle==2.2.1
- coverage==6.2
- psutil==7.0.0
- pytest-timeout==2.1.0
prefix: /opt/conda/envs/loky
| [
"tests/test_synchronize.py::test_semlock_failure"
] | [] | [
"tests/test_synchronize.py::TestLock::test_lock",
"tests/test_synchronize.py::TestLock::test_rlock",
"tests/test_synchronize.py::TestLock::test_lock_context",
"tests/test_synchronize.py::TestSemaphore::test_semaphore",
"tests/test_synchronize.py::TestSemaphore::test_bounded_semaphore",
"tests/test_synchronize.py::TestSemaphore::test_timeout",
"tests/test_synchronize.py::TestCondition::test_notify",
"tests/test_synchronize.py::TestCondition::test_notify_all",
"tests/test_synchronize.py::TestCondition::test_timeout",
"tests/test_synchronize.py::TestCondition::test_waitfor",
"tests/test_synchronize.py::TestCondition::test_wait_result",
"tests/test_synchronize.py::TestEvent::test_event"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,575 | 603 | [
"loky/backend/semaphore_tracker.py",
"loky/backend/semlock.py",
"loky/backend/synchronize.py"
] |
google__mobly-453 | f1aff6a7f06887424759e3c192b1bf6e13d2a6bf | 2018-05-24 19:50:41 | 95286a01a566e056d44acfa9577a45bc7f37f51d | xpconanfan: I don't see how this is related to logging stderr as the issue described.
One of the msg is incorrect?
---
Review status: 0 of 2 files reviewed at latest revision, all discussions resolved.
---
*[mobly/controllers/android_device_lib/adb.py, line 215 at r1](https://reviewable.io/reviews/google/mobly/453#-LDIjCGd2zhkKzp2ablH:-LDIjCGd2zhkKzp2ablI:b-x8c38) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/mobly/controllers/android_device_lib/adb.py#L215)):*
> ```Python
> break
> finally:
> (unhandled_out, err) = proc.communicate()
> ```
wait, so this does happen?
shouldn't we call the handler with this out instead?
---
*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453#-:-LDIjnBO4MUgZxYNImBt:blud5im)*
<!-- Sent from Reviewable.io -->
winterfroststrom:
Review status: 0 of 2 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device_lib/adb.py, line 215 at r1](https://reviewable.io/reviews/google/mobly/453#-LDIjCGd2zhkKzp2ablH:-LDIkOBz8aQLOE0ovB8O:brjczjz) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/mobly/controllers/android_device_lib/adb.py#L215)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
wait, so this does happen?
shouldn't we call the handler with this out instead?
</blockquote></details>
I'm not sure?
I'm adding logging here first to try to determine what the underlying problem is
---
*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 1 unresolved discussion.
---
*[mobly/controllers/android_device_lib/adb.py, line 215 at r1](https://reviewable.io/reviews/google/mobly/453#-LDIjCGd2zhkKzp2ablH:-LDIl2971UfZ3LqypsHv:b332s67) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/mobly/controllers/android_device_lib/adb.py#L215)):*
<details><summary><i>Previously, winterfroststrom wrote…</i></summary><blockquote>
I'm not sure?
I'm adding logging here first to try to determine what the underlying problem is
</blockquote></details>
seems like we should pipe all stdout content through the handler as this function promised?
you could add additional logging to signify the existence of stdout from `communicate`?
---
*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453)*
<!-- Sent from Reviewable.io -->
xpconanfan:
Review status: 0 of 2 files reviewed at latest revision, 1 unresolved discussion.
---
*[tests/mobly/controllers/android_device_lib/adb_test.py, line 156 at r1](https://reviewable.io/reviews/google/mobly/453#-LDImOOR-GL1gRhihxxl:-LDImOOR-GL1gRhihxxm:ba86vyn) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/tests/mobly/controllers/android_device_lib/adb_test.py#L156)):*
> ```Python
> def test_execute_and_process_stdout_logs_cmd(self, mock_debug_logger,
> mock_popen):
> self._mock_execute_and_process_stdout_process(mock_popen)
> ```
this test is relying on the default mock stdout value in `_mock_execute_and_process_stdout_process`, which is difficult to read.
Can we more explicitly set the mock value within the test?
---
*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453)*
<!-- Sent from Reviewable.io -->
winterfroststrom:
Review status: 0 of 2 files reviewed at latest revision, 2 unresolved discussions.
---
*[mobly/controllers/android_device_lib/adb.py, line 215 at r1](https://reviewable.io/reviews/google/mobly/453#-LDIjCGd2zhkKzp2ablH:-LDIsvuPD4jS9CVrpnr5:bcy1d3j) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/mobly/controllers/android_device_lib/adb.py#L215)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
seems like we should pipe all stdout content through the handler as this function promised?
you could add additional logging to signify the existence of stdout from `communicate`?
</blockquote></details>
So, I've never seen this output actually get populated and I'm not sure it is in the case I'm debugging, but okay.
I'm preferring changing the logged command because otherwise you'd get semi-duplicate log lines.
---
*[tests/mobly/controllers/android_device_lib/adb_test.py, line 156 at r1](https://reviewable.io/reviews/google/mobly/453#-LDImOOR-GL1gRhihxxl:-LDItG5yEIdICTRzP1c0:b-896fix) ([raw file](https://github.com/google/mobly/blob/73f94e45966ec8566eabae03fc00893e5a13ee33/tests/mobly/controllers/android_device_lib/adb_test.py#L156)):*
<details><summary><i>Previously, xpconanfan (Ang Li) wrote…</i></summary><blockquote>
this test is relying on the default mock stdout value in `_mock_execute_and_process_stdout_process`, which is difficult to read.
Can we more explicitly set the mock value within the test?
</blockquote></details>
Done.
---
*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453)*
<!-- Sent from Reviewable.io -->
xpconanfan: <img class="emoji" title=":lgtm:" alt=":lgtm:" align="absmiddle" src="https://reviewable.io/lgtm.png" height="20" width="61"/>
---
Review status: 0 of 2 files reviewed at latest revision, all discussions resolved.
---
*Comments from [Reviewable](https://reviewable.io/reviews/google/mobly/453#-:-LEBweyyBV-cQJiSKgM7:bnfp4nl)*
<!-- Sent from Reviewable.io -->
| diff --git a/mobly/controllers/android_device_lib/adb.py b/mobly/controllers/android_device_lib/adb.py
index 90dcd0b..95d1261 100644
--- a/mobly/controllers/android_device_lib/adb.py
+++ b/mobly/controllers/android_device_lib/adb.py
@@ -203,6 +203,7 @@ class AdbProxy(object):
stderr=subprocess.PIPE,
shell=shell,
bufsize=1)
+ out = '[elided, processed via handler]'
try:
while proc.poll() is None:
line = proc.stdout.readline()
@@ -211,16 +212,19 @@ class AdbProxy(object):
else:
break
finally:
- (_, err) = proc.communicate()
+ (unexpected_out, err) = proc.communicate()
+ if unexpected_out:
+ out = '[unexpected stdout] %s' % unexpected_out
+ for line in unexpected_out.splitlines():
+ handler(line)
+
ret = proc.returncode
+ logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s',
+ cli_cmd_to_string(args), out, err, ret)
if ret == 0:
return err
else:
- raise AdbError(
- cmd=args,
- stdout='[elided, processed via handler]',
- stderr=err,
- ret_code=ret)
+ raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret)
def _construct_adb_cmd(self, raw_name, args, shell):
"""Constructs an adb command with arguments for a subprocess call.
diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py
index e3e835d..03674ff 100644
--- a/mobly/controllers/android_device_lib/snippet_client.py
+++ b/mobly/controllers/android_device_lib/snippet_client.py
@@ -125,8 +125,7 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
# Yaaay! We're done!
self.log.debug('Snippet %s started after %.1fs on host port %s',
- self.package,
- time.time() - start_time, self.host_port)
+ self.package, time.time() - start_time, self.host_port)
def restore_app_connection(self, port=None):
"""Restores the app after device got reconnected.
@@ -151,12 +150,13 @@ class SnippetClient(jsonrpc_client_base.JsonRpcClientBase):
try:
self.connect()
except:
- # Failed to connect to app, something went wrong.
+ # Log the original error and raise AppRestoreConnectionError.
+ self.log.exception('Failed to re-connect to app.')
raise jsonrpc_client_base.AppRestoreConnectionError(
- self._ad(
- 'Failed to restore app connection for %s at host port %s, '
- 'device port %s'), self.package, self.host_port,
- self.device_port)
+ self._ad,
+ ('Failed to restore app connection for %s at host port %s, '
+ 'device port %s') % (self.package, self.host_port,
+ self.device_port))
# Because the previous connection was lost, update self._proc
self._proc = None
| `_execute_and_process_stdout` should log cmd | google/mobly | diff --git a/tests/mobly/controllers/android_device_lib/adb_test.py b/tests/mobly/controllers/android_device_lib/adb_test.py
index 1c75a9d..8dec8aa 100755
--- a/tests/mobly/controllers/android_device_lib/adb_test.py
+++ b/tests/mobly/controllers/android_device_lib/adb_test.py
@@ -76,8 +76,7 @@ class AdbTest(unittest.TestCase):
mock_popen.return_value.stdout.readline.side_effect = ['']
mock_proc.communicate = mock.Mock(
- return_value=(MOCK_DEFAULT_STDOUT.encode('utf-8'),
- MOCK_DEFAULT_STDERR.encode('utf-8')))
+ return_value=('', MOCK_DEFAULT_STDERR.encode('utf-8')))
mock_proc.returncode = 0
return mock_popen
@@ -150,6 +149,57 @@ class AdbTest(unittest.TestCase):
mock_handler.assert_any_call('1')
mock_handler.assert_any_call('2')
+ @mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
+ def test_execute_and_process_stdout_reads_unexpected_stdout(
+ self, mock_popen):
+ unexpected_stdout = MOCK_DEFAULT_STDOUT.encode('utf-8')
+
+ self._mock_execute_and_process_stdout_process(mock_popen)
+ mock_handler = mock.MagicMock()
+ mock_popen.return_value.communicate = mock.Mock(
+ return_value=(unexpected_stdout, MOCK_DEFAULT_STDERR.encode(
+ 'utf-8')))
+
+ err = adb.AdbProxy()._execute_and_process_stdout(
+ ['fake_cmd'], shell=False, handler=mock_handler)
+ self.assertEqual(mock_handler.call_count, 1)
+ mock_handler.assert_called_with(unexpected_stdout)
+
+ @mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
+ @mock.patch('logging.debug')
+ def test_execute_and_process_stdout_logs_cmd(self, mock_debug_logger,
+ mock_popen):
+ raw_expected_stdout = ''
+ expected_stdout = '[elided, processed via handler]'
+ expected_stderr = MOCK_DEFAULT_STDERR.encode('utf-8')
+ self._mock_execute_and_process_stdout_process(mock_popen)
+ mock_popen.return_value.communicate = mock.Mock(
+ return_value=(raw_expected_stdout, expected_stderr))
+
+ err = adb.AdbProxy()._execute_and_process_stdout(
+ ['fake_cmd'], shell=False, handler=mock.MagicMock())
+ mock_debug_logger.assert_called_with(
+ 'cmd: %s, stdout: %s, stderr: %s, ret: %s', 'fake_cmd',
+ expected_stdout, expected_stderr, 0)
+
+ @mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
+ @mock.patch('logging.debug')
+ def test_execute_and_process_stdout_logs_cmd_with_unexpected_stdout(
+ self, mock_debug_logger, mock_popen):
+ raw_expected_stdout = MOCK_DEFAULT_STDOUT.encode('utf-8')
+ expected_stdout = '[unexpected stdout] %s' % raw_expected_stdout
+ expected_stderr = MOCK_DEFAULT_STDERR.encode('utf-8')
+
+ self._mock_execute_and_process_stdout_process(mock_popen)
+ mock_popen.return_value.communicate = mock.Mock(
+ return_value=(raw_expected_stdout, expected_stderr))
+
+ err = adb.AdbProxy()._execute_and_process_stdout(
+ ['fake_cmd'], shell=False, handler=mock.MagicMock())
+ mock_debug_logger.assert_called_with(
+ 'cmd: %s, stdout: %s, stderr: %s, ret: %s', 'fake_cmd',
+ expected_stdout, expected_stderr, 0)
+
@mock.patch('mobly.controllers.android_device_lib.adb.subprocess.Popen')
def test_execute_and_process_stdout_when_cmd_exits(self, mock_popen):
self._mock_execute_and_process_stdout_process(mock_popen)
diff --git a/tests/mobly/controllers/android_device_lib/snippet_client_test.py b/tests/mobly/controllers/android_device_lib/snippet_client_test.py
index 2c875d8..d964ae3 100755
--- a/tests/mobly/controllers/android_device_lib/snippet_client_test.py
+++ b/tests/mobly/controllers/android_device_lib/snippet_client_test.py
@@ -166,6 +166,15 @@ class SnippetClientTest(jsonrpc_client_test_base.JsonRpcClientTestBase):
self.assertEqual(789, callback._event_client.host_port)
self.assertEqual(456, callback._event_client.device_port)
+ # if unable to reconnect for any reason, a
+ # jsonrpc_client_base.AppRestoreConnectionError is raised.
+ mock_create_connection.side_effect = IOError('socket timed out')
+ with self.assertRaisesRegex(
+ jsonrpc_client_base.AppRestoreConnectionError,
+ ('Failed to restore app connection for %s at host port %s, '
+ 'device port %s') % (MOCK_PACKAGE_NAME, 789, 456)):
+ client.restore_app_connection()
+
@mock.patch('socket.create_connection')
@mock.patch('mobly.controllers.android_device_lib.snippet_client.'
'utils.start_standing_subprocess')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 2
} | 1.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"pytest-cov",
"pytest-xdist",
"pytest-mock",
"pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
execnet==2.1.1
future==1.0.0
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
-e git+https://github.com/google/mobly.git@f1aff6a7f06887424759e3c192b1bf6e13d2a6bf#egg=mobly
mock==1.0.1
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
portpicker==1.6.0
psutil==7.0.0
pyserial==3.5
pytest @ file:///croot/pytest_1738938843180/work
pytest-asyncio==0.26.0
pytest-cov==6.0.0
pytest-mock==3.14.0
pytest-xdist==3.6.1
pytz==2025.2
PyYAML==6.0.2
timeout-decorator==0.5.0
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
typing_extensions==4.13.0
| name: mobly
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- execnet==2.1.1
- future==1.0.0
- mock==1.0.1
- portpicker==1.6.0
- psutil==7.0.0
- pyserial==3.5
- pytest-asyncio==0.26.0
- pytest-cov==6.0.0
- pytest-mock==3.14.0
- pytest-xdist==3.6.1
- pytz==2025.2
- pyyaml==6.0.2
- timeout-decorator==0.5.0
- typing-extensions==4.13.0
prefix: /opt/conda/envs/mobly
| [
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_logs_cmd",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_logs_cmd_with_unexpected_stdout",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_reads_unexpected_stdout",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_restore_event_client"
] | [] | [
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_cli_cmd_to_string",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_list",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_one_arg_command",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_one_arg_command_list",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_one_command",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_serial",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_serial_with_list",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_auto_quotes",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_list",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_one_arg_command",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_one_arg_command_list",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_one_command",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_serial",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_shell_true_with_serial_with_list",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_construct_adb_cmd_with_special_characters",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_formats_command",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_formats_command_with_shell_true",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_shell_true",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_adb_cmd_with_stderr_pipe",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_error_no_timeout",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_no_timeout_success",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_timed_out",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_with_negative_timeout_value",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_exec_cmd_with_timeout_success",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_adb_and_process_stdout_formats_command",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_raises_adb_error",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_reads_stdout",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_returns_stderr",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_when_cmd_eof",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_when_cmd_exits",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_execute_and_process_stdout_when_handler_crash",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_forward",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_called_correctly",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_existing_command",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_missing_command_on_newer_devices",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_has_shell_command_with_missing_command_on_older_devices",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_handler",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_handler_with_options",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_handler_with_runner",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_options",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_with_runner",
"tests/mobly/controllers/android_device_lib/adb_test.py::AdbTest::test_instrument_without_parameters",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_app_not_installed",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_not_instrumented",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_fail_target_not_installed",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_check_app_installed_normal",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_header_junk",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_no_valid_line",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_persistent_session",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_and_connect_unknown_protocol",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_app_crash",
"tests/mobly/controllers/android_device_lib/snippet_client_test.py::SnippetClientTest::test_snippet_start_event_client"
] | [] | Apache License 2.0 | 2,581 | 778 | [
"mobly/controllers/android_device_lib/adb.py",
"mobly/controllers/android_device_lib/snippet_client.py"
] |
nipy__nipype-2597 | 9eaa2a32c8cb3569633a79d6f7968270453f9aed | 2018-05-25 13:56:19 | 704b97dee7848283692bac38f04541c5af2a87b5 | diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py
index 08d357ff6..0a59aac26 100644
--- a/nipype/pipeline/engine/utils.py
+++ b/nipype/pipeline/engine/utils.py
@@ -1054,12 +1054,14 @@ def generate_expanded_graph(graph_in):
for src_id in list(old_edge_dict.keys()):
# Drop the original JoinNodes; only concerned with
# generated Nodes
- if hasattr(node, 'joinfield'):
+ if hasattr(node, 'joinfield') and node.itername == src_id:
continue
# Patterns:
# - src_id : Non-iterable node
- # - src_id.[a-z]\d+ : IdentityInterface w/ iterables
- # - src_id.[a-z]I.[a-z]\d+ : Non-IdentityInterface w/ iterables
+ # - src_id.[a-z]\d+ :
+ # IdentityInterface w/ iterables or nested JoinNode
+ # - src_id.[a-z]I.[a-z]\d+ :
+ # Non-IdentityInterface w/ iterables
# - src_idJ\d+ : JoinNode(IdentityInterface)
if re.match(src_id + r'((\.[a-z](I\.[a-z])?|J)\d+)?$',
node.itername):
| PR #2479 has broken my package
### Summary
PR #2479 has broken my package (https://pypi.org/project/arcana/)
I am not quite sure what the rationale behind the changes are so it is difficult to know how to debug or whether there is something I can change in my package.
### Actual behavior
Workflow exits with error
```
File "/Users/tclose/git/ni/arcana/test/mwe/nipype_pr2479/test.py", line 71, in <module>
study.data('out')
File "/Users/tclose/git/ni/arcana/arcana/study/base.py", line 325, in data
visit_ids=visit_ids)
File "/Users/tclose/git/ni/arcana/arcana/runner/base.py", line 37, in run
return workflow.run(plugin=self._plugin)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/workflows.py", line 595, in run
runner.run(execgraph, updatehash=updatehash, config=self.config)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/plugins/linear.py", line 44, in run
node.run(updatehash=updatehash)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py", line 480, in run
result = self._run_interface(execute=True)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py", line 564, in _run_interface
return self._run_command(execute)
File "/Users/tclose/git/ni/arcana/arcana/node.py", line 59, in _run_command
result = self.nipype_cls._run_command(self, *args, **kwargs)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py", line 888, in _run_command
self._collate_join_field_inputs()
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py", line 898, in _collate_join_field_inputs
val = self._collate_input_value(field)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py", line 928, in _collate_input_value
for idx in range(self._next_slot_index)
File "/Users/tclose/git/ni/nipype/nipype/pipeline/engine/nodes.py", line 947, in _slot_value
field, index, e))
AttributeError: The join node pipeline1.pipeline1_subject_session_outputs does not have a slot field subject_session_pairsJ1 to hold the subject_session_pairs value at index 0: 'DynamicTraitedSpec' object has no attribute 'subject_session_pairsJ1'
```
### Expected behavior
The workflow runs without error
### How to replicate the behavior
See script below
### Script/Workflow details
I have tried to come up with a MWE that doesn't use my package but it was proving difficult. However, you can now install my package with pip
`pip install arcana`
and run the following
```
import os.path
import shutil
from nipype import config
config.enable_debug_mode()
import nipype # @IgnorePep8
from nipype.interfaces.utility import IdentityInterface # @IgnorePep8
from arcana.dataset import DatasetMatch, DatasetSpec # @IgnorePep8
from arcana.data_format import text_format # @IgnorePep8
from arcana.study.base import Study, StudyMetaClass # @IgnorePep8
from arcana.archive.local import LocalArchive # @IgnorePep8
from arcana.runner import LinearRunner # @IgnorePep8
BASE_ARCHIVE_DIR = os.path.join(os.path.dirname(__file__), 'archives')
BASE_WORK_DIR = os.path.join(os.path.dirname(__file__), 'work')
print(nipype.get_info())
print(nipype.__version__)
class TestStudy(Study):
__metaclass__ = StudyMetaClass
add_data_specs = [
DatasetSpec('in', text_format),
DatasetSpec('out', text_format, 'pipeline')]
def pipeline(self, **kwargs):
pipeline = self.create_pipeline(
name='pipeline1',
inputs=[DatasetSpec('in', text_format)],
outputs=[DatasetSpec('out', text_format)],
desc="A dummy pipeline used to test 'run_pipeline' method",
version=1,
citations=[],
**kwargs)
ident = pipeline.create_node(IdentityInterface(['a']),
name="ident")
# Connect inputs
pipeline.connect_input('in', ident, 'a')
# Connect outputs
pipeline.connect_output('out', ident, 'a')
return pipeline
# Create archives
shutil.rmtree(BASE_ARCHIVE_DIR, ignore_errors=True)
shutil.rmtree(BASE_WORK_DIR, ignore_errors=True)
os.makedirs(BASE_ARCHIVE_DIR)
for sess in (['ARCHIVE1', 'SUBJECT', 'VISIT'],
['ARCHIVE2', 'SUBJECT1', 'VISIT1'],
['ARCHIVE2', 'SUBJECT1', 'VISIT2'],
['ARCHIVE2', 'SUBJECT2', 'VISIT1'],
['ARCHIVE2', 'SUBJECT2', 'VISIT2']):
sess_dir = os.path.join(*([BASE_ARCHIVE_DIR] + sess))
os.makedirs(sess_dir)
with open(os.path.join(sess_dir, 'in.txt'), 'w') as f:
f.write('in')
archive1_path = os.path.join(BASE_ARCHIVE_DIR, 'ARCHIVE1')
archive2_path = os.path.join(BASE_ARCHIVE_DIR, 'ARCHIVE2')
work1_path = os.path.join(BASE_WORK_DIR, 'WORK1')
work2_path = os.path.join(BASE_WORK_DIR, 'WORK2')
# Attempt to run with archive with 2 subjects and 2 visits
study = TestStudy('two',
LocalArchive(archive2_path),
LinearRunner(work2_path),
inputs=[DatasetMatch('in', text_format, 'in')])
# Fails here
study2.data('out')
print("Ran study 2")
#
study1 = TestStudy('one',
LocalArchive(archive1_path),
LinearRunner(work1_path),
inputs=[DatasetMatch('in', text_format, 'in')])
study1.data('out')
print("Ran study 1")
```
to reproduce the error
### Platform details:
{'nibabel_version': '2.2.1', 'sys_executable': '/usr/local/opt/python@2/bin/python2.7', 'networkx_version': '1.9', 'numpy_version': '1.14.3', 'sys_platform': 'darwin', 'sys_version': '2.7.15 (default, May 1 2018, 16:44:08) \n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]', 'commit_source': 'repository', 'commit_hash': '5a96ea54a', 'pkg_path': '/Users/tclose/git/ni/nipype/nipype', 'nipype_version': '1.0.4-dev+g5a96ea54a', 'traits_version': '4.6.0', 'scipy_version': '1.1.0'}
1.0.4-dev+g5a96ea54a
(problem arose in 1.0.1)
### Execution environment
My Homebrew python 2 environment outside container
| nipy/nipype | diff --git a/nipype/pipeline/engine/tests/test_join.py b/nipype/pipeline/engine/tests/test_join.py
index 54ff15048..77fc0f2fd 100644
--- a/nipype/pipeline/engine/tests/test_join.py
+++ b/nipype/pipeline/engine/tests/test_join.py
@@ -627,3 +627,35 @@ def test_name_prefix_join(tmpdir):
joinfield=['in1'])
wf.connect(square, 'out', square_join, "in1")
wf.run()
+
+
+def test_join_nestediters(tmpdir):
+ tmpdir.chdir()
+
+ def exponent(x, p):
+ return x ** p
+
+ wf = pe.Workflow('wf', base_dir=tmpdir.strpath)
+
+ xs = pe.Node(IdentityInterface(['x']),
+ iterables=[('x', [1, 2])],
+ name='xs')
+ ps = pe.Node(IdentityInterface(['p']),
+ iterables=[('p', [3, 4])],
+ name='ps')
+ exp = pe.Node(Function(function=exponent), name='exp')
+ exp_joinx = pe.JoinNode(Merge(1, ravel_inputs=True),
+ name='exp_joinx',
+ joinsource='xs',
+ joinfield=['in1'])
+ exp_joinp = pe.JoinNode(Merge(1, ravel_inputs=True),
+ name='exp_joinp',
+ joinsource='ps',
+ joinfield=['in1'])
+ wf.connect([
+ (xs, exp, [('x', 'x')]),
+ (ps, exp, [('p', 'p')]),
+ (exp, exp_joinx, [('out', 'in1')]),
+ (exp_joinx, exp_joinp, [('out', 'in1')])])
+
+ wf.run()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 1.0 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
attrs==22.2.0
Babel==2.11.0
certifi==2021.5.30
charset-normalizer==2.0.12
click==8.0.4
codecov==2.1.13
configparser==5.2.0
coverage==6.2
cycler==0.11.0
decorator==4.4.2
docutils==0.18.1
execnet==1.9.0
funcsigs==1.0.2
future==1.0.0
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
iniconfig==1.1.1
isodate==0.6.1
Jinja2==3.0.3
kiwisolver==1.3.1
lxml==5.3.1
MarkupSafe==2.0.1
matplotlib==3.3.4
mock==5.2.0
networkx==2.5.1
nibabel==3.2.2
-e git+https://github.com/nipy/nipype.git@9eaa2a32c8cb3569633a79d6f7968270453f9aed#egg=nipype
numpy==1.19.5
numpydoc==1.1.0
packaging==21.3
Pillow==8.4.0
pluggy==1.0.0
prov==1.5.0
py==1.11.0
pydot==1.4.2
pydotplus==2.0.2
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
pytest-env==0.6.2
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pytz==2025.2
rdflib==5.0.0
requests==2.27.1
scipy==1.5.4
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
tomli==1.2.3
traits==6.4.1
typing_extensions==4.1.1
urllib3==1.26.20
yapf==0.32.0
zipp==3.6.0
| name: nipype
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- attrs==22.2.0
- babel==2.11.0
- charset-normalizer==2.0.12
- click==8.0.4
- codecov==2.1.13
- configparser==5.2.0
- coverage==6.2
- cycler==0.11.0
- decorator==4.4.2
- docutils==0.18.1
- execnet==1.9.0
- funcsigs==1.0.2
- future==1.0.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isodate==0.6.1
- jinja2==3.0.3
- kiwisolver==1.3.1
- lxml==5.3.1
- markupsafe==2.0.1
- matplotlib==3.3.4
- mock==5.2.0
- networkx==2.5.1
- nibabel==3.2.2
- numpy==1.19.5
- numpydoc==1.1.0
- packaging==21.3
- pillow==8.4.0
- pluggy==1.0.0
- prov==1.5.0
- py==1.11.0
- pydot==1.4.2
- pydotplus==2.0.2
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pytest-env==0.6.2
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pytz==2025.2
- rdflib==5.0.0
- requests==2.27.1
- scipy==1.5.4
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- tomli==1.2.3
- traits==6.4.1
- typing-extensions==4.1.1
- urllib3==1.26.20
- yapf==0.32.0
- zipp==3.6.0
prefix: /opt/conda/envs/nipype
| [
"nipype/pipeline/engine/tests/test_join.py::test_join_nestediters"
] | [] | [
"nipype/pipeline/engine/tests/test_join.py::test_node_joinsource",
"nipype/pipeline/engine/tests/test_join.py::test_set_join_node_file_input",
"nipype/pipeline/engine/tests/test_join.py::test_nested_workflow_join",
"nipype/pipeline/engine/tests/test_join.py::test_name_prefix_join",
"nipype/pipeline/engine/tests/test_join.py::test_identity_join_node",
"nipype/pipeline/engine/tests/test_join.py::test_multiple_join_nodes",
"nipype/pipeline/engine/tests/test_join.py::test_unique_join_node",
"nipype/pipeline/engine/tests/test_join.py::test_join_expansion",
"nipype/pipeline/engine/tests/test_join.py::test_set_join_node",
"nipype/pipeline/engine/tests/test_join.py::test_multifield_join_node",
"nipype/pipeline/engine/tests/test_join.py::test_synchronize_join_node",
"nipype/pipeline/engine/tests/test_join.py::test_itersource_join_source_node",
"nipype/pipeline/engine/tests/test_join.py::test_itersource_two_join_nodes"
] | [] | Apache License 2.0 | 2,586 | 332 | [
"nipype/pipeline/engine/utils.py"
] |
|
theolind__pymysensors-154 | f373e86e5423c8a92bb5adeb7b03ae7b64850e04 | 2018-05-26 12:59:31 | f373e86e5423c8a92bb5adeb7b03ae7b64850e04 | diff --git a/mysensors/__init__.py b/mysensors/__init__.py
index f57486b..784f988 100644
--- a/mysensors/__init__.py
+++ b/mysensors/__init__.py
@@ -49,7 +49,7 @@ class Gateway(object):
self.metric = True # if true - use metric, if false - use imperial
if persistence:
self.persistence = Persistence(
- self.sensors, persistence_file, persistence_scheduler)
+ self.sensors, persistence_scheduler, persistence_file)
else:
self.persistence = None
self.protocol_version = safe_is_version(protocol_version)
@@ -351,7 +351,8 @@ class ThreadingGateway(Gateway):
def __init__(self, *args, **kwargs):
"""Set up gateway instance."""
- super().__init__(*args, **kwargs)
+ super().__init__(
+ *args, persistence_scheduler=self._create_scheduler, **kwargs)
self.lock = threading.Lock()
self._stop_event = threading.Event()
self._cancel_save = None
@@ -373,12 +374,22 @@ class ThreadingGateway(Gateway):
continue
time.sleep(0.02)
+ def _create_scheduler(self, save_sensors):
+ """Return function to schedule saving sensors."""
+ def schedule_save():
+ """Save sensors and schedule a new save."""
+ save_sensors()
+ scheduler = threading.Timer(10.0, schedule_save)
+ scheduler.start()
+ self._cancel_save = scheduler.cancel
+ return schedule_save
+
def start_persistence(self):
"""Load persistence file and schedule saving of persistence file."""
if not self.persistence:
return
self.persistence.safe_load_sensors()
- self._cancel_save = self.persistence.schedule_save_sensors()
+ self.persistence.schedule_save_sensors()
def stop(self):
"""Stop the background thread."""
@@ -494,7 +505,7 @@ class BaseAsyncGateway(BaseTransportGateway):
"""Return function to schedule saving sensors."""
@asyncio.coroutine
def schedule_save():
- """Return a function to cancel the schedule."""
+ """Save sensors and schedule a new save."""
yield from self.loop.run_in_executor(None, save_sensors)
callback = partial(
ensure_future, schedule_save(), loop=self.loop)
diff --git a/mysensors/persistence.py b/mysensors/persistence.py
index 5dd4b57..efb2e6c 100644
--- a/mysensors/persistence.py
+++ b/mysensors/persistence.py
@@ -3,35 +3,21 @@ import json
import logging
import os
import pickle
-import threading
from .sensor import ChildSensor, Sensor
_LOGGER = logging.getLogger(__name__)
-def create_scheduler(save_sensors):
- """Return function to schedule saving sensors."""
- def schedule_save():
- """Return a function to cancel the schedule."""
- save_sensors()
- scheduler = threading.Timer(10.0, schedule_save)
- scheduler.start()
- return scheduler.cancel
- return schedule_save
-
-
class Persistence(object):
"""Organize persistence file saving and loading."""
def __init__(
- self, sensors, persistence_file='mysensors.pickle',
- schedule_factory=None):
+ self, sensors, schedule_factory,
+ persistence_file='mysensors.pickle'):
"""Set up Persistence instance."""
self.persistence_file = persistence_file
self.persistence_bak = '{}.bak'.format(self.persistence_file)
- if schedule_factory is None:
- schedule_factory = create_scheduler
self.schedule_save_sensors = schedule_factory(self.save_sensors)
self._sensors = sensors
self.need_save = True
| Main program does not exit cleanly
[branch master - version 0.14.0 - Using serial gateway - NO asyncio]
After calling the SerialGateway.stop() method the program does not return to console but seems to be looping in a still alive thread ( probably the persistence thread).
************************************************************************************
```py
MYSGW_Serial_Port = '/dev/ttyMSGW'
....
GATEWAY = mysensors.SerialGateway(
MYSGW_Serial_Port, event_callback=event, persistence=True,
persistence_file='./mysensors.json', protocol_version='2.0', baud=115200,
timeout=1.0, reconnect_timeout=10.0)
GATEWAY.start_persistence()
GATEWAY.start()
....
....
GATEWAY.stop() #-> main thread does not go past this point
exit(0)
```
************************************************************************************* | theolind/pymysensors | diff --git a/tests/test_gateway_mqtt.py b/tests/test_gateway_mqtt.py
index cba4270..60700fa 100644
--- a/tests/test_gateway_mqtt.py
+++ b/tests/test_gateway_mqtt.py
@@ -143,11 +143,10 @@ def test_subscribe_error(gateway, add_sensor, mock_sub, caplog):
def test_start_stop_gateway(
mock_save, mock_load, gateway, add_sensor, mock_pub, mock_sub):
"""Test start and stop of MQTT gateway."""
- gateway.persistence = Persistence(gateway.sensors)
- mock_cancel_save = mock.MagicMock()
+ mock_schedule_factory = mock.MagicMock()
mock_schedule_save = mock.MagicMock()
- mock_schedule_save.return_value = mock_cancel_save
- gateway.persistence.schedule_save_sensors = mock_schedule_save
+ mock_schedule_factory.return_value = mock_schedule_save
+ gateway.persistence = Persistence(gateway.sensors, mock_schedule_factory)
sensor = add_sensor(1)
sensor.add_child_sensor(1, gateway.const.Presentation.S_HUM)
sensor.children[1].values[gateway.const.SetReq.V_HUM] = '20'
@@ -173,7 +172,6 @@ def test_start_stop_gateway(
assert mock_pub.call_count == 2
assert mock_pub.mock_calls == calls
gateway.stop()
- assert mock_cancel_save.call_count == 1
assert mock_save.call_count == 1
@@ -185,7 +183,7 @@ def test_mqtt_load_persistence(gateway, add_sensor, mock_sub, tmpdir):
persistence_file = tmpdir.join('file.json')
gateway.persistence = Persistence(
- gateway.sensors, persistence_file.strpath)
+ gateway.sensors, mock.MagicMock(), persistence_file.strpath)
gateway.persistence.save_sensors()
del gateway.sensors[1]
assert 1 not in gateway.sensors
diff --git a/tests/test_mysensors.py b/tests/test_mysensors.py
index 403ce9d..3a6c9c8 100644
--- a/tests/test_mysensors.py
+++ b/tests/test_mysensors.py
@@ -613,6 +613,28 @@ def test_gateway_low_protocol():
assert gateway.protocol_version == '1.4'
[email protected]('mysensors.persistence.Persistence.save_sensors')
[email protected]('mysensors.threading.Timer')
+def test_threading_persistence(mock_timer_class, mock_save_sensors):
+ """Test schedule persistence on threading gateway."""
+ mock_timer_1 = mock.MagicMock()
+ mock_timer_2 = mock.MagicMock()
+ mock_timer_class.side_effect = [mock_timer_1, mock_timer_2]
+ gateway = ThreadingGateway(persistence=True)
+ gateway.persistence.schedule_save_sensors()
+ assert mock_save_sensors.call_count == 1
+ assert mock_timer_class.call_count == 1
+ assert mock_timer_1.start.call_count == 1
+ gateway.persistence.schedule_save_sensors()
+ assert mock_save_sensors.call_count == 2
+ assert mock_timer_class.call_count == 2
+ assert mock_timer_1.start.call_count == 1
+ assert mock_timer_2.start.call_count == 1
+ gateway.stop()
+ assert mock_timer_2.cancel.call_count == 1
+ assert mock_save_sensors.call_count == 3
+
+
def test_update_fw():
"""Test calling fw_update with bad path."""
gateway = ThreadingGateway()
diff --git a/tests/test_persistence.py b/tests/test_persistence.py
index fdf5464..c5d8896 100644
--- a/tests/test_persistence.py
+++ b/tests/test_persistence.py
@@ -45,7 +45,7 @@ def test_persistence(gateway, add_sensor, filename, tmpdir):
persistence_file = tmpdir.join(filename)
gateway.persistence = Persistence(
- gateway.sensors, persistence_file.strpath)
+ gateway.sensors, mock.MagicMock(), persistence_file.strpath)
gateway.persistence.save_sensors()
del gateway.sensors[1]
assert 1 not in gateway.sensors
@@ -75,7 +75,7 @@ def test_bad_file_name(gateway, add_sensor, tmpdir):
add_sensor(1)
persistence_file = tmpdir.join('file.bad')
gateway.persistence = Persistence(
- gateway.sensors, persistence_file.strpath)
+ gateway.sensors, mock.MagicMock(), persistence_file.strpath)
with pytest.raises(Exception):
gateway.persistence.save_sensors()
@@ -85,7 +85,7 @@ def test_json_no_files(gateway, tmpdir):
assert not gateway.sensors
persistence_file = tmpdir.join('file.json')
gateway.persistence = Persistence(
- gateway.sensors, persistence_file.strpath)
+ gateway.sensors, mock.MagicMock(), persistence_file.strpath)
gateway.persistence.safe_load_sensors()
assert not gateway.sensors
@@ -97,7 +97,7 @@ def test_empty_files(gateway, filename, tmpdir):
assert not gateway.sensors
persistence_file = tmpdir.join(filename)
gateway.persistence = Persistence(
- gateway.sensors, persistence_file.strpath)
+ gateway.sensors, mock.MagicMock(), persistence_file.strpath)
persistence = gateway.persistence
persistence_file.write('')
with open(persistence.persistence_bak, 'w') as file_handle:
@@ -112,7 +112,8 @@ def test_json_empty_file_good_bak(gateway, add_sensor, tmpdir):
assert 1 in gateway.sensors
persistence_file = tmpdir.join('file.json')
orig_file_name = persistence_file.strpath
- gateway.persistence = Persistence(gateway.sensors, orig_file_name)
+ gateway.persistence = Persistence(
+ gateway.sensors, mock.MagicMock(), orig_file_name)
gateway.persistence.save_sensors()
del gateway.sensors[1]
assert 1 not in gateway.sensors
@@ -160,7 +161,7 @@ def test_persistence_upgrade(
assert 'description' not in sensor.children[0].__dict__
persistence_file = tmpdir.join(filename)
gateway.persistence = Persistence(
- gateway.sensors, persistence_file.strpath)
+ gateway.sensors, mock.MagicMock(), persistence_file.strpath)
gateway.persistence.save_sensors()
del gateway.sensors[1]
assert 1 not in gateway.sensors
@@ -175,16 +176,21 @@ def test_persistence_upgrade(
assert gateway.sensors[1].children[0].type == sensor.children[0].type
[email protected]('mysensors.persistence.threading.Timer')
@mock.patch('mysensors.persistence.Persistence.save_sensors')
-def test_schedule_save_sensors(mock_save, mock_timer_class, gateway):
+def test_schedule_save_sensors(mock_save, gateway):
"""Test schedule save sensors."""
- mock_timer = mock.MagicMock()
- mock_timer_class.return_value = mock_timer
- gateway.persistence = Persistence(gateway.sensors)
+ mock_schedule_save = mock.MagicMock()
+ mock_schedule_factory = mock.MagicMock()
+ mock_schedule_factory.return_value = mock_schedule_save
+
+ gateway.persistence = Persistence(gateway.sensors, mock_schedule_factory)
+
+ assert mock_schedule_factory.call_count == 1
+ assert mock_schedule_factory.call_args == mock.call(mock_save)
+
gateway.persistence.schedule_save_sensors()
- assert mock_save.call_count == 1
- assert mock_timer.start.call_count == 1
+
+ assert mock_schedule_save.call_count == 1
class MySensorsJSONEncoderTestUpgrade(MySensorsJSONEncoder):
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 0.14 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"flake8",
"pylint",
"pydocstyle"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==2.11.7
attrs==22.2.0
certifi==2021.5.30
crcmod==1.7
dill==0.3.4
flake8==5.0.4
get-mac==0.9.2
importlib-metadata==4.2.0
iniconfig==1.1.1
intelhex==2.3.0
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
py==1.11.0
pycodestyle==2.9.1
pydocstyle==6.3.0
pyflakes==2.5.0
pylint==2.13.9
-e git+https://github.com/theolind/pymysensors.git@f373e86e5423c8a92bb5adeb7b03ae7b64850e04#egg=pymysensors
pyparsing==3.1.4
pyserial==3.5
pyserial-asyncio==0.6
pytest==7.0.1
snowballstemmer==2.2.0
tomli==1.2.3
typed-ast==1.5.5
typing_extensions==4.1.1
voluptuous==0.11.1
wrapt==1.16.0
zipp==3.6.0
| name: pymysensors
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==2.11.7
- attrs==22.2.0
- crcmod==1.7
- dill==0.3.4
- flake8==5.0.4
- get-mac==0.9.2
- importlib-metadata==4.2.0
- iniconfig==1.1.1
- intelhex==2.3.0
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- py==1.11.0
- pycodestyle==2.9.1
- pydocstyle==6.3.0
- pyflakes==2.5.0
- pylint==2.13.9
- pyparsing==3.1.4
- pyserial==3.5
- pyserial-asyncio==0.6
- pytest==7.0.1
- snowballstemmer==2.2.0
- tomli==1.2.3
- typed-ast==1.5.5
- typing-extensions==4.1.1
- voluptuous==0.11.1
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/pymysensors
| [
"tests/test_gateway_mqtt.py::test_start_stop_gateway",
"tests/test_gateway_mqtt.py::test_mqtt_load_persistence",
"tests/test_mysensors.py::test_threading_persistence",
"tests/test_persistence.py::test_persistence[file.pickle]",
"tests/test_persistence.py::test_persistence[file.json]",
"tests/test_persistence.py::test_bad_file_name",
"tests/test_persistence.py::test_json_no_files",
"tests/test_persistence.py::test_empty_files[file.pickle]",
"tests/test_persistence.py::test_empty_files[file.json]",
"tests/test_persistence.py::test_json_empty_file_good_bak",
"tests/test_persistence.py::test_persistence_upgrade[file.pickle]",
"tests/test_persistence.py::test_persistence_upgrade[file.json]",
"tests/test_persistence.py::test_schedule_save_sensors"
] | [] | [
"tests/test_gateway_mqtt.py::test_send",
"tests/test_gateway_mqtt.py::test_send_empty_string",
"tests/test_gateway_mqtt.py::test_send_error",
"tests/test_gateway_mqtt.py::test_recv",
"tests/test_gateway_mqtt.py::test_recv_wrong_prefix",
"tests/test_gateway_mqtt.py::test_presentation",
"tests/test_gateway_mqtt.py::test_presentation_no_sensor",
"tests/test_gateway_mqtt.py::test_subscribe_error",
"tests/test_gateway_mqtt.py::test_nested_prefix",
"tests/test_gateway_mqtt.py::test_get_gateway_id",
"tests/test_mysensors.py::test_logic_bad_message[1.4]",
"tests/test_mysensors.py::test_logic_bad_message[1.5]",
"tests/test_mysensors.py::test_logic_bad_message[2.0]",
"tests/test_mysensors.py::test_logic_bad_message[2.1]",
"tests/test_mysensors.py::test_logic_bad_message[2.2]",
"tests/test_mysensors.py::test_non_presented_sensor[1.4-None]",
"tests/test_mysensors.py::test_non_presented_sensor[1.5-None]",
"tests/test_mysensors.py::test_non_presented_sensor[2.0-1;255;3;0;19;\\n]",
"tests/test_mysensors.py::test_non_presented_sensor[2.1-1;255;3;0;19;\\n]",
"tests/test_mysensors.py::test_non_presented_sensor[2.2-1;255;3;0;19;\\n]",
"tests/test_mysensors.py::test_present_to_non_sensor[1.4-None]",
"tests/test_mysensors.py::test_present_to_non_sensor[1.5-None]",
"tests/test_mysensors.py::test_present_to_non_sensor[2.0-1;255;3;0;19;\\n]",
"tests/test_mysensors.py::test_present_to_non_sensor[2.1-1;255;3;0;19;\\n]",
"tests/test_mysensors.py::test_present_to_non_sensor[2.2-1;255;3;0;19;\\n]",
"tests/test_mysensors.py::test_internal_id_request[1.4]",
"tests/test_mysensors.py::test_internal_id_request[1.5]",
"tests/test_mysensors.py::test_internal_id_request[2.0]",
"tests/test_mysensors.py::test_internal_id_request[2.1]",
"tests/test_mysensors.py::test_internal_id_request[2.2]",
"tests/test_mysensors.py::test_id_request_with_node_zero[1.4]",
"tests/test_mysensors.py::test_id_request_with_node_zero[1.5]",
"tests/test_mysensors.py::test_id_request_with_node_zero[2.0]",
"tests/test_mysensors.py::test_id_request_with_node_zero[2.1]",
"tests/test_mysensors.py::test_id_request_with_node_zero[2.2]",
"tests/test_mysensors.py::test_presentation_arduino_node[1.4]",
"tests/test_mysensors.py::test_presentation_arduino_node[1.5]",
"tests/test_mysensors.py::test_presentation_arduino_node[2.0]",
"tests/test_mysensors.py::test_presentation_arduino_node[2.1]",
"tests/test_mysensors.py::test_presentation_arduino_node[2.2]",
"tests/test_mysensors.py::test_id_request_presentation[1.4]",
"tests/test_mysensors.py::test_id_request_presentation[1.5]",
"tests/test_mysensors.py::test_id_request_presentation[2.0]",
"tests/test_mysensors.py::test_id_request_presentation[2.1]",
"tests/test_mysensors.py::test_id_request_presentation[2.2]",
"tests/test_mysensors.py::test_internal_config[1.4]",
"tests/test_mysensors.py::test_internal_config[1.5]",
"tests/test_mysensors.py::test_internal_config[2.0]",
"tests/test_mysensors.py::test_internal_config[2.1]",
"tests/test_mysensors.py::test_internal_config[2.2]",
"tests/test_mysensors.py::test_internal_time[1.4]",
"tests/test_mysensors.py::test_internal_time[1.5]",
"tests/test_mysensors.py::test_internal_time[2.0]",
"tests/test_mysensors.py::test_internal_time[2.1]",
"tests/test_mysensors.py::test_internal_time[2.2]",
"tests/test_mysensors.py::test_internal_sketch_name[1.4]",
"tests/test_mysensors.py::test_internal_sketch_name[1.5]",
"tests/test_mysensors.py::test_internal_sketch_name[2.0]",
"tests/test_mysensors.py::test_internal_sketch_name[2.1]",
"tests/test_mysensors.py::test_internal_sketch_name[2.2]",
"tests/test_mysensors.py::test_internal_sketch_version[1.4]",
"tests/test_mysensors.py::test_internal_sketch_version[1.5]",
"tests/test_mysensors.py::test_internal_sketch_version[2.0]",
"tests/test_mysensors.py::test_internal_sketch_version[2.1]",
"tests/test_mysensors.py::test_internal_sketch_version[2.2]",
"tests/test_mysensors.py::test_internal_log_message[1.4]",
"tests/test_mysensors.py::test_internal_log_message[1.5]",
"tests/test_mysensors.py::test_internal_log_message[2.0]",
"tests/test_mysensors.py::test_internal_log_message[2.1]",
"tests/test_mysensors.py::test_internal_log_message[2.2]",
"tests/test_mysensors.py::test_internal_gateway_ready[1.4-None]",
"tests/test_mysensors.py::test_internal_gateway_ready[1.5-None]",
"tests/test_mysensors.py::test_internal_gateway_ready[2.0-255;255;3;0;20;\\n]",
"tests/test_mysensors.py::test_internal_gateway_ready[2.1-255;255;3;0;20;\\n]",
"tests/test_mysensors.py::test_internal_gateway_ready[2.2-255;255;3;0;20;\\n]",
"tests/test_mysensors.py::test_present_light_level_sensor[1.4]",
"tests/test_mysensors.py::test_present_light_level_sensor[1.5]",
"tests/test_mysensors.py::test_present_light_level_sensor[2.0]",
"tests/test_mysensors.py::test_present_light_level_sensor[2.1]",
"tests/test_mysensors.py::test_present_light_level_sensor[2.2]",
"tests/test_mysensors.py::test_present_humidity_sensor[1.4]",
"tests/test_mysensors.py::test_present_humidity_sensor[1.5]",
"tests/test_mysensors.py::test_present_humidity_sensor[2.0]",
"tests/test_mysensors.py::test_present_humidity_sensor[2.1]",
"tests/test_mysensors.py::test_present_humidity_sensor[2.2]",
"tests/test_mysensors.py::test_present_same_child[1.4]",
"tests/test_mysensors.py::test_present_same_child[1.5]",
"tests/test_mysensors.py::test_present_same_child[2.0]",
"tests/test_mysensors.py::test_present_same_child[2.1]",
"tests/test_mysensors.py::test_present_same_child[2.2]",
"tests/test_mysensors.py::test_set_light_level[1.4]",
"tests/test_mysensors.py::test_set_light_level[1.5]",
"tests/test_mysensors.py::test_set_light_level[2.0]",
"tests/test_mysensors.py::test_set_light_level[2.1]",
"tests/test_mysensors.py::test_set_light_level[2.2]",
"tests/test_mysensors.py::test_set_humidity_level[1.4]",
"tests/test_mysensors.py::test_set_humidity_level[1.5]",
"tests/test_mysensors.py::test_set_humidity_level[2.0]",
"tests/test_mysensors.py::test_set_humidity_level[2.1]",
"tests/test_mysensors.py::test_set_humidity_level[2.2]",
"tests/test_mysensors.py::test_battery_level[1.4]",
"tests/test_mysensors.py::test_battery_level[1.5]",
"tests/test_mysensors.py::test_battery_level[2.0]",
"tests/test_mysensors.py::test_battery_level[2.1]",
"tests/test_mysensors.py::test_battery_level[2.2]",
"tests/test_mysensors.py::test_bad_battery_level[1.4]",
"tests/test_mysensors.py::test_bad_battery_level[1.5]",
"tests/test_mysensors.py::test_bad_battery_level[2.0]",
"tests/test_mysensors.py::test_bad_battery_level[2.1]",
"tests/test_mysensors.py::test_bad_battery_level[2.2]",
"tests/test_mysensors.py::test_req[1.4]",
"tests/test_mysensors.py::test_req[1.5]",
"tests/test_mysensors.py::test_req[2.0]",
"tests/test_mysensors.py::test_req[2.1]",
"tests/test_mysensors.py::test_req[2.2]",
"tests/test_mysensors.py::test_req_zerovalue[1.4]",
"tests/test_mysensors.py::test_req_zerovalue[1.5]",
"tests/test_mysensors.py::test_req_zerovalue[2.0]",
"tests/test_mysensors.py::test_req_zerovalue[2.1]",
"tests/test_mysensors.py::test_req_zerovalue[2.2]",
"tests/test_mysensors.py::test_req_novalue[1.4]",
"tests/test_mysensors.py::test_req_novalue[1.5]",
"tests/test_mysensors.py::test_req_novalue[2.0]",
"tests/test_mysensors.py::test_req_novalue[2.1]",
"tests/test_mysensors.py::test_req_novalue[2.2]",
"tests/test_mysensors.py::test_req_notasensor[1.4]",
"tests/test_mysensors.py::test_req_notasensor[1.5]",
"tests/test_mysensors.py::test_req_notasensor[2.0]",
"tests/test_mysensors.py::test_req_notasensor[2.1]",
"tests/test_mysensors.py::test_req_notasensor[2.2]",
"tests/test_mysensors.py::test_callback[1.4]",
"tests/test_mysensors.py::test_callback[1.5]",
"tests/test_mysensors.py::test_callback[2.0]",
"tests/test_mysensors.py::test_callback[2.1]",
"tests/test_mysensors.py::test_callback[2.2]",
"tests/test_mysensors.py::test_callback_exception[1.4]",
"tests/test_mysensors.py::test_callback_exception[1.5]",
"tests/test_mysensors.py::test_callback_exception[2.0]",
"tests/test_mysensors.py::test_callback_exception[2.1]",
"tests/test_mysensors.py::test_callback_exception[2.2]",
"tests/test_mysensors.py::test_set_and_reboot[1.4]",
"tests/test_mysensors.py::test_set_and_reboot[1.5]",
"tests/test_mysensors.py::test_set_and_reboot[2.0]",
"tests/test_mysensors.py::test_set_and_reboot[2.1]",
"tests/test_mysensors.py::test_set_and_reboot[2.2]",
"tests/test_mysensors.py::test_set_child_value[1.4]",
"tests/test_mysensors.py::test_set_child_value[1.5]",
"tests/test_mysensors.py::test_set_child_value[2.0]",
"tests/test_mysensors.py::test_set_child_value[2.1]",
"tests/test_mysensors.py::test_set_child_value[2.2]",
"tests/test_mysensors.py::test_set_child_value_no_sensor[1.4-None]",
"tests/test_mysensors.py::test_set_child_value_no_sensor[1.5-None]",
"tests/test_mysensors.py::test_set_child_value_no_sensor[2.0-1;255;3;0;19;\\n]",
"tests/test_mysensors.py::test_set_child_value_no_sensor[2.1-1;255;3;0;19;\\n]",
"tests/test_mysensors.py::test_set_child_value_no_sensor[2.2-1;255;3;0;19;\\n]",
"tests/test_mysensors.py::test_non_presented_child[1.4-None]",
"tests/test_mysensors.py::test_non_presented_child[1.5-None]",
"tests/test_mysensors.py::test_non_presented_child[2.0-1;255;3;0;19;\\n]",
"tests/test_mysensors.py::test_non_presented_child[2.1-1;255;3;0;19;\\n]",
"tests/test_mysensors.py::test_non_presented_child[2.2-1;255;3;0;19;\\n]",
"tests/test_mysensors.py::test_set_child_no_children[1.4]",
"tests/test_mysensors.py::test_set_child_no_children[1.5]",
"tests/test_mysensors.py::test_set_child_no_children[2.0]",
"tests/test_mysensors.py::test_set_child_no_children[2.1]",
"tests/test_mysensors.py::test_set_child_no_children[2.2]",
"tests/test_mysensors.py::test_set_child_value_bad_type[1.4]",
"tests/test_mysensors.py::test_set_child_value_bad_type[1.5]",
"tests/test_mysensors.py::test_set_child_value_bad_type[2.0]",
"tests/test_mysensors.py::test_set_child_value_bad_type[2.1]",
"tests/test_mysensors.py::test_set_child_value_bad_type[2.2]",
"tests/test_mysensors.py::test_set_child_value_bad_ack[1.4]",
"tests/test_mysensors.py::test_set_child_value_bad_ack[1.5]",
"tests/test_mysensors.py::test_set_child_value_bad_ack[2.0]",
"tests/test_mysensors.py::test_set_child_value_bad_ack[2.1]",
"tests/test_mysensors.py::test_set_child_value_bad_ack[2.2]",
"tests/test_mysensors.py::test_set_child_value_value_type[1.4]",
"tests/test_mysensors.py::test_set_child_value_value_type[1.5]",
"tests/test_mysensors.py::test_set_child_value_value_type[2.0]",
"tests/test_mysensors.py::test_set_child_value_value_type[2.1]",
"tests/test_mysensors.py::test_set_child_value_value_type[2.2]",
"tests/test_mysensors.py::test_child_validate[1.4]",
"tests/test_mysensors.py::test_child_validate[1.5]",
"tests/test_mysensors.py::test_child_validate[2.0]",
"tests/test_mysensors.py::test_child_validate[2.1]",
"tests/test_mysensors.py::test_child_validate[2.2]",
"tests/test_mysensors.py::test_set_forecast[1.4]",
"tests/test_mysensors.py::test_set_forecast[1.5]",
"tests/test_mysensors.py::test_set_forecast[2.0]",
"tests/test_mysensors.py::test_set_forecast[2.1]",
"tests/test_mysensors.py::test_set_forecast[2.2]",
"tests/test_mysensors.py::test_set_bad_battery_attribute[1.4]",
"tests/test_mysensors.py::test_set_bad_battery_attribute[1.5]",
"tests/test_mysensors.py::test_set_bad_battery_attribute[2.0]",
"tests/test_mysensors.py::test_set_bad_battery_attribute[2.1]",
"tests/test_mysensors.py::test_set_bad_battery_attribute[2.2]",
"tests/test_mysensors.py::test_set_rgb[1.5]",
"tests/test_mysensors.py::test_set_rgb[2.0]",
"tests/test_mysensors.py::test_set_rgb[2.1]",
"tests/test_mysensors.py::test_set_rgb[2.2]",
"tests/test_mysensors.py::test_set_rgbw[1.5]",
"tests/test_mysensors.py::test_set_rgbw[2.0]",
"tests/test_mysensors.py::test_set_rgbw[2.1]",
"tests/test_mysensors.py::test_set_rgbw[2.2]",
"tests/test_mysensors.py::test_smartsleep[2.0-1;255;3;0;22;\\n]",
"tests/test_mysensors.py::test_smartsleep[2.1-1;255;3;0;22;\\n]",
"tests/test_mysensors.py::test_smartsleep[2.2-1;255;3;0;32;500\\n]",
"tests/test_mysensors.py::test_smartsleep_from_unknown[2.0-1;255;3;0;22;\\n]",
"tests/test_mysensors.py::test_smartsleep_from_unknown[2.1-1;255;3;0;22;\\n]",
"tests/test_mysensors.py::test_smartsleep_from_unknown[2.2-1;255;3;0;32;500\\n]",
"tests/test_mysensors.py::test_set_with_new_state[2.0-1;255;3;0;22;\\n]",
"tests/test_mysensors.py::test_set_with_new_state[2.1-1;255;3;0;22;\\n]",
"tests/test_mysensors.py::test_set_with_new_state[2.2-1;255;3;0;32;500\\n]",
"tests/test_mysensors.py::test_discover_response_unknown[2.0]",
"tests/test_mysensors.py::test_discover_response_unknown[2.1]",
"tests/test_mysensors.py::test_discover_response_unknown[2.2]",
"tests/test_mysensors.py::test_discover_response_known[2.0]",
"tests/test_mysensors.py::test_discover_response_known[2.1]",
"tests/test_mysensors.py::test_discover_response_known[2.2]",
"tests/test_mysensors.py::test_set_position[2.0]",
"tests/test_mysensors.py::test_set_position[2.1]",
"tests/test_mysensors.py::test_set_position[2.2]",
"tests/test_mysensors.py::test_gateway_bad_protocol",
"tests/test_mysensors.py::test_gateway_low_protocol",
"tests/test_mysensors.py::test_update_fw",
"tests/test_mysensors.py::test_update_fw_bad_path"
] | [] | MIT License | 2,590 | 847 | [
"mysensors/__init__.py",
"mysensors/persistence.py"
] |
|
acorg__dark-matter-576 | 66f246ba9417430e3f00e94ca0abc88de59a92d4 | 2018-05-27 14:07:28 | 66f246ba9417430e3f00e94ca0abc88de59a92d4 | diff --git a/dark/__init__.py b/dark/__init__.py
index 0246a07..6a59296 100644
--- a/dark/__init__.py
+++ b/dark/__init__.py
@@ -7,4 +7,4 @@ if sys.version_info < (2, 7):
# will not be found by the version() function in ../setup.py
#
# Remember to update ../CHANGELOG.md describing what's new in each version.
-__version__ = '3.0.5'
+__version__ = '3.0.6'
diff --git a/dark/filter.py b/dark/filter.py
index 0665ffc..b0daa76 100644
--- a/dark/filter.py
+++ b/dark/filter.py
@@ -279,6 +279,23 @@ def addFASTAFilteringCommandLineOptions(parser):
help=('A file of (1-based) sequence numbers to retain. Numbers must '
'be one per line.'))
+ parser.add_argument(
+ '--idLambda', metavar='LAMBDA-FUNCTION',
+ help=('A one-argument function taking and returning a read id. '
+ 'E.g., --idLambda "lambda id: id.split(\'_\')[0]" or '
+ '--idLambda "lambda id: id[:10]". If the function returns None, '
+ 'the read will be filtered out.'))
+
+ parser.add_argument(
+ '--readLambda', metavar='LAMBDA-FUNCTION',
+ help=('A one-argument function taking and returning a read. '
+ 'E.g., --readLambda "lambda r: Read(r.id.split(\'_\')[0], '
+ 'r.sequence.strip(\'-\')". Make sure to also modify the quality '
+ 'string if you change the length of a FASTQ sequence. If the '
+ 'function returns None, the read will be filtered out. The '
+ 'function will be passed to eval with the dark.reads classes '
+ 'Read, DNARead, AARead, etc. all in scope.'))
+
# A mutually exclusive group for --keepSites, --keepSitesFile,
# --removeSites, and --removeSitesFile.
group = parser.add_mutually_exclusive_group()
@@ -381,4 +398,5 @@ def parseFASTAFilteringCommandLineOptions(args, reads):
randomSubset=args.randomSubset, trueLength=args.trueLength,
sampleFraction=args.sampleFraction,
sequenceNumbersFile=args.sequenceNumbersFile,
+ idLambda=args.idLambda, readLambda=args.readLambda,
keepSites=keepSites, removeSites=removeSites)
diff --git a/dark/reads.py b/dark/reads.py
index 42390e4..1074f78 100644
--- a/dark/reads.py
+++ b/dark/reads.py
@@ -740,8 +740,9 @@ class ReadFilter(object):
sequence identity.
@param removeDuplicatesById: If C{True} remove duplicated reads based
only on read id.
- @param removeDescriptions: If C{True} remove the description part of read
- ids (i.e., the part following the first whitespace).
+ @param removeDescriptions: If C{True} remove the description (the part
+ following the first whitespace) from read ids. The description is
+ removed after applying the function specified by --idLambda (if any).
@param modifier: If not C{None}, a function that is passed a read
and which either returns a read or C{None}. If it returns a read,
that read is passed through the filter. If it returns C{None},
@@ -791,6 +792,14 @@ class ReadFilter(object):
file containing (1-based) sequence numbers, in ascending order,
one per line. Only those sequences matching the given numbers will
be kept.
+ @param idLambda: If not C{None}, a C{str} Python lambda function
+ specification to use to modify read ids. The function is applied
+ before removing the description (if --removeDescriptions is also
+ specified).
+ @param readLambda: If not C{None}, a C{str} Python lambda function
+ specification to use to modify reads. The function will be passed,
+ and must return, a single Read (or one of its subclasses). This
+ function is called after the --idLambda function, if any.
@param keepSites: A set of C{int} 0-based sites (i.e., indices) in
sequences that should be kept. If C{None} (the default), all sites are
kept.
@@ -819,7 +828,8 @@ class ReadFilter(object):
removeDuplicates=False, removeDuplicatesById=False,
removeDescriptions=False, modifier=None, randomSubset=None,
trueLength=None, sampleFraction=None,
- sequenceNumbersFile=None, keepSites=None, removeSites=None):
+ sequenceNumbersFile=None, idLambda=None, readLambda=None,
+ keepSites=None, removeSites=None):
if randomSubset is not None:
if sampleFraction is not None:
@@ -929,6 +939,9 @@ class ReadFilter(object):
sampleFraction = None
self.sampleFraction = sampleFraction
+ self.idLambda = eval(idLambda) if idLambda else None
+ self.readLambda = eval(readLambda) if readLambda else None
+
def filter(self, read):
"""
Check if a read passes the filter.
@@ -1038,6 +1051,20 @@ class ReadFilter(object):
elif self.removeSites is not None:
read = read.newFromSites(self.removeSites, exclude=True)
+ if self.idLambda:
+ newId = self.idLambda(read.id)
+ if newId is None:
+ return False
+ else:
+ read.id = newId
+
+ if self.readLambda:
+ newRead = self.readLambda(read)
+ if newRead is None:
+ return False
+ else:
+ read = newRead
+
if self.removeDescriptions:
read.id = read.id.split()[0]
| Add ability to give an anonymous Python function for read id conversion when filtering FASTA | acorg/dark-matter | diff --git a/test/test_reads.py b/test/test_reads.py
index 4e51442..5d9cd3e 100644
--- a/test/test_reads.py
+++ b/test/test_reads.py
@@ -3126,6 +3126,52 @@ class TestReadsFiltering(TestCase):
six.assertRaisesRegex(self, ValueError, error, Reads().filter,
keepSites={4}, removeSites={5})
+ def testIdLambda(self):
+ """
+ A passed idLambda function should produce the expected read ids.
+ """
+ read = Read('id1', 'ATCGCC')
+ reads = Reads(initialReads=[read])
+ result = reads.filter(idLambda='lambda id: "x-" + id.upper()')
+ self.assertEqual('x-ID1', list(result)[0].id)
+
+ def testIdLambdaReturningNone(self):
+ """
+ A passed idLambda function should produce the expected read ids,
+ including when it returns None.
+ """
+ read1 = Read('id1', 'ATCGCC')
+ read2 = Read('id2', 'GGATCG')
+ reads = Reads(initialReads=[read1, read2])
+ result = reads.filter(
+ idLambda='lambda id: "aa" if id.find("1") > -1 else None')
+ (result,) = list(result)
+ self.assertEqual('aa', result.id)
+
+ def testReadLambda(self):
+ """
+ A passed readLambda function should produce the expected reads.
+ """
+ read = Read('id1', 'ATCGCC')
+ reads = Reads(initialReads=[read])
+ result = reads.filter(readLambda='lambda r: Read("hey", "AAA")')
+ (result,) = list(result)
+ self.assertEqual(Read('hey', 'AAA'), result)
+
+ def testReadLambdaReturningNone(self):
+ """
+ A passed readLambda function should produce the expected reads,
+ including when it returns None.
+ """
+ read1 = Read('xid1', 'ATCGCC')
+ read2 = Read('yid2', 'GGATCG')
+ reads = Reads(initialReads=[read1, read2])
+ result = reads.filter(
+ readLambda=('lambda r: Read(r.id + "-x", r.sequence[:2]) '
+ 'if r.id.startswith("x") else None'))
+ (result,) = list(result)
+ self.assertEqual(Read('xid1-x', 'AT'), result)
+
class TestReadsInRAM(TestCase):
"""
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 3
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest pytest-cov pytest-xdist pytest-mock pytest-asyncio"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
backcall==0.2.0
biopython==1.79
bz2file==0.98
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
coverage==6.2
cycler==0.11.0
-e git+https://github.com/acorg/dark-matter.git@66f246ba9417430e3f00e94ca0abc88de59a92d4#egg=dark_matter
decorator==5.1.1
execnet==1.9.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
ipython==7.16.3
ipython-genutils==0.2.0
jedi==0.17.2
kiwisolver==1.3.1
matplotlib==3.3.4
numpy==1.19.5
packaging==21.3
parso==0.7.1
pexpect==4.9.0
pickleshare==0.7.5
Pillow==8.4.0
pluggy==1.0.0
prompt-toolkit==3.0.36
ptyprocess==0.7.0
py==1.11.0
pycparser==2.21
pyfaidx==0.7.1
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-asyncio==0.16.0
pytest-cov==4.0.0
pytest-mock==3.6.1
pytest-xdist==3.0.2
python-dateutil==2.9.0.post0
pyzmq==25.1.2
requests==2.27.1
simplejson==3.20.1
six==1.17.0
tomli==1.2.3
traitlets==4.3.3
typing_extensions==4.1.1
urllib3==1.26.20
wcwidth==0.2.13
zipp==3.6.0
| name: dark-matter
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- backcall==0.2.0
- biopython==1.79
- bz2file==0.98
- cffi==1.15.1
- charset-normalizer==2.0.12
- coverage==6.2
- cycler==0.11.0
- decorator==5.1.1
- execnet==1.9.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- ipython==7.16.3
- ipython-genutils==0.2.0
- jedi==0.17.2
- kiwisolver==1.3.1
- matplotlib==3.3.4
- numpy==1.19.5
- packaging==21.3
- parso==0.7.1
- pexpect==4.9.0
- pickleshare==0.7.5
- pillow==8.4.0
- pluggy==1.0.0
- prompt-toolkit==3.0.36
- ptyprocess==0.7.0
- py==1.11.0
- pycparser==2.21
- pyfaidx==0.7.1
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-asyncio==0.16.0
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytest-xdist==3.0.2
- python-dateutil==2.9.0.post0
- pyzmq==25.1.2
- requests==2.27.1
- simplejson==3.20.1
- six==1.17.0
- tomli==1.2.3
- traitlets==4.3.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- wcwidth==0.2.13
- zipp==3.6.0
prefix: /opt/conda/envs/dark-matter
| [
"test/test_reads.py::TestReadsFiltering::testIdLambda",
"test/test_reads.py::TestReadsFiltering::testIdLambdaReturningNone",
"test/test_reads.py::TestReadsFiltering::testReadLambda",
"test/test_reads.py::TestReadsFiltering::testReadLambdaReturningNone"
] | [
"test/test_reads.py::TestReadsInRAM::testFastaFile"
] | [
"test/test_reads.py::TestRead::testCasePreservation",
"test/test_reads.py::TestRead::testCheckAlphabetAAReadMatchingReturnTrue",
"test/test_reads.py::TestRead::testCheckAlphabetAAReadNotMatchingRaise",
"test/test_reads.py::TestRead::testCheckAlphabetDNAReadMatchingReturnTrue",
"test/test_reads.py::TestRead::testCheckAlphabetDNAReadNotMatchingRaise",
"test/test_reads.py::TestRead::testCheckAlphabetwithReadMustBePermissive",
"test/test_reads.py::TestRead::testEquality",
"test/test_reads.py::TestRead::testEqualityWithDifferingIds",
"test/test_reads.py::TestRead::testEqualityWithDifferingQuality",
"test/test_reads.py::TestRead::testEqualityWithDifferingSequences",
"test/test_reads.py::TestRead::testEqualityWithNoQuality",
"test/test_reads.py::TestRead::testEqualityWithOneOmittedQuality",
"test/test_reads.py::TestRead::testExpectedAttributes",
"test/test_reads.py::TestRead::testFromDict",
"test/test_reads.py::TestRead::testFromDictNoQuality",
"test/test_reads.py::TestRead::testGetitemFullCopy",
"test/test_reads.py::TestRead::testGetitemId",
"test/test_reads.py::TestRead::testGetitemLength",
"test/test_reads.py::TestRead::testGetitemQuality",
"test/test_reads.py::TestRead::testGetitemReturnsNewRead",
"test/test_reads.py::TestRead::testGetitemReversed",
"test/test_reads.py::TestRead::testGetitemSequence",
"test/test_reads.py::TestRead::testGetitemSingleIndex",
"test/test_reads.py::TestRead::testGetitemWithStep",
"test/test_reads.py::TestRead::testHashDiffersIfIdDiffers",
"test/test_reads.py::TestRead::testHashDiffersIfQualityDiffers",
"test/test_reads.py::TestRead::testHashDiffersIfSequenceDiffers",
"test/test_reads.py::TestRead::testHashIdenticalNoQuality",
"test/test_reads.py::TestRead::testHashIdenticalWithQuality",
"test/test_reads.py::TestRead::testHashViaDict",
"test/test_reads.py::TestRead::testHashViaSet",
"test/test_reads.py::TestRead::testKeepSites",
"test/test_reads.py::TestRead::testKeepSitesAllSites",
"test/test_reads.py::TestRead::testKeepSitesNoSites",
"test/test_reads.py::TestRead::testKeepSitesOutOfRange",
"test/test_reads.py::TestRead::testKeepSitesWithQuality",
"test/test_reads.py::TestRead::testLength",
"test/test_reads.py::TestRead::testLowComplexityFraction",
"test/test_reads.py::TestRead::testLowComplexityFractionEmptySequence",
"test/test_reads.py::TestRead::testLowComplexityFractionOne",
"test/test_reads.py::TestRead::testLowComplexityFractionZero",
"test/test_reads.py::TestRead::testNoQuality",
"test/test_reads.py::TestRead::testRemoveSites",
"test/test_reads.py::TestRead::testRemoveSitesAllSites",
"test/test_reads.py::TestRead::testRemoveSitesNoSites",
"test/test_reads.py::TestRead::testRemoveSitesOutOfRange",
"test/test_reads.py::TestRead::testRemoveSitesWithQuality",
"test/test_reads.py::TestRead::testToDict",
"test/test_reads.py::TestRead::testToDictNoQuality",
"test/test_reads.py::TestRead::testToFASTA",
"test/test_reads.py::TestRead::testToFASTAWithQuality",
"test/test_reads.py::TestRead::testToFASTQ",
"test/test_reads.py::TestRead::testToFASTQWithNoQuality",
"test/test_reads.py::TestRead::testToUnknownFormat",
"test/test_reads.py::TestRead::testUnequalLengths",
"test/test_reads.py::TestRead::testWalkHSPExactMatch",
"test/test_reads.py::TestRead::testWalkHSPExactMatchWithGap",
"test/test_reads.py::TestRead::testWalkHSPLeftAndRightOverhangingMatch",
"test/test_reads.py::TestRead::testWalkHSPLeftAndRightOverhangingMatchNoWhiskers",
"test/test_reads.py::TestRead::testWalkHSPLeftOverhangingMatch",
"test/test_reads.py::TestRead::testWalkHSPLeftOverhangingMatchNoWhiskers",
"test/test_reads.py::TestRead::testWalkHSPRightOverhangingMatch",
"test/test_reads.py::TestRead::testWalkHSPRightOverhangingMatchNoWhiskers",
"test/test_reads.py::TestDNARead::testGetitemReturnsNewDNARead",
"test/test_reads.py::TestDNARead::testReverseComplement",
"test/test_reads.py::TestDNARead::testReverseComplementAmbiguous",
"test/test_reads.py::TestDNARead::testReverseComplementReversesQuality",
"test/test_reads.py::TestDNARead::testTranslationOfMultipleStopCodons",
"test/test_reads.py::TestDNARead::testTranslationOfStartCodonATG",
"test/test_reads.py::TestDNARead::testTranslationOfStopCodonTAG",
"test/test_reads.py::TestDNARead::testTranslationOfStopCodonTGA",
"test/test_reads.py::TestDNARead::testTranslations",
"test/test_reads.py::TestDNARead::testTranslationsOfEmptySequence",
"test/test_reads.py::TestDNARead::testTranslationsOfOneBaseSequence",
"test/test_reads.py::TestDNARead::testTranslationsOfTwoBaseSequence",
"test/test_reads.py::TestRNARead::testGetitemReturnsNewRNARead",
"test/test_reads.py::TestRNARead::testReverseComplement",
"test/test_reads.py::TestRNARead::testReverseComplementAmbiguous",
"test/test_reads.py::TestRNARead::testTranslationOfStopCodonUAA",
"test/test_reads.py::TestAARead::testCloseCloseORF",
"test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseORF",
"test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseThenCloseCloseORF",
"test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseThenCloseCloseORFWithJunk",
"test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseThenCloseOpenORF",
"test/test_reads.py::TestAARead::testCloseCloseThenCloseCloseThenCloseOpenORFWithJunk",
"test/test_reads.py::TestAARead::testCloseCloseThenCloseOpenORF",
"test/test_reads.py::TestAARead::testCloseOpenORF",
"test/test_reads.py::TestAARead::testCloseOpenORFWithMultipleStarts",
"test/test_reads.py::TestAARead::testGetitemReturnsNewAARead",
"test/test_reads.py::TestAARead::testNoStartCodon_GithubIssue239",
"test/test_reads.py::TestAARead::testORFsEmptySequence",
"test/test_reads.py::TestAARead::testORFsEmptySequenceWithStart",
"test/test_reads.py::TestAARead::testORFsEmptySequenceWithStartStop",
"test/test_reads.py::TestAARead::testORFsWithJustStartsAndStops",
"test/test_reads.py::TestAARead::testORFsWithOneStopCodon",
"test/test_reads.py::TestAARead::testORFsWithTwoStopCodons",
"test/test_reads.py::TestAARead::testOpenCloseORF",
"test/test_reads.py::TestAARead::testOpenCloseORFWithMultipleStops",
"test/test_reads.py::TestAARead::testOpenCloseThenCloseCloseThenCloseOpenORF",
"test/test_reads.py::TestAARead::testOpenCloseThenCloseCloseThenCloseOpenORFWithJunk",
"test/test_reads.py::TestAARead::testOpenCloseThenCloseOpenORF",
"test/test_reads.py::TestAARead::testOpenOpenORF",
"test/test_reads.py::TestAARead::testPropertiesCorrectTranslation",
"test/test_reads.py::TestAARead::testPropertyDetailsCorrectTranslation",
"test/test_reads.py::TestAAReadWithX::testAlphabet",
"test/test_reads.py::TestAAReadWithX::testAlphabetChecking",
"test/test_reads.py::TestAAReadWithX::testGetitemReturnsNewAAReadWithX",
"test/test_reads.py::TestAAReadORF::testClosedClosedId",
"test/test_reads.py::TestAAReadORF::testClosedOpenId",
"test/test_reads.py::TestAAReadORF::testFromDict",
"test/test_reads.py::TestAAReadORF::testOpenClosedId",
"test/test_reads.py::TestAAReadORF::testOpenLeft",
"test/test_reads.py::TestAAReadORF::testOpenOpenId",
"test/test_reads.py::TestAAReadORF::testOpenRight",
"test/test_reads.py::TestAAReadORF::testSequence",
"test/test_reads.py::TestAAReadORF::testStart",
"test/test_reads.py::TestAAReadORF::testStartGreaterThanStop",
"test/test_reads.py::TestAAReadORF::testStartNegative",
"test/test_reads.py::TestAAReadORF::testStop",
"test/test_reads.py::TestAAReadORF::testStopGreaterThanOriginalSequenceLength",
"test/test_reads.py::TestAAReadORF::testToDict",
"test/test_reads.py::TestSSAARead::testCorrectAttributes",
"test/test_reads.py::TestSSAARead::testFromDict",
"test/test_reads.py::TestSSAARead::testGetitemFullCopy",
"test/test_reads.py::TestSSAARead::testGetitemId",
"test/test_reads.py::TestSSAARead::testGetitemLength",
"test/test_reads.py::TestSSAARead::testGetitemReturnsNewRead",
"test/test_reads.py::TestSSAARead::testGetitemReversed",
"test/test_reads.py::TestSSAARead::testGetitemSequence",
"test/test_reads.py::TestSSAARead::testGetitemSingleIndex",
"test/test_reads.py::TestSSAARead::testGetitemStructure",
"test/test_reads.py::TestSSAARead::testGetitemWithStep",
"test/test_reads.py::TestSSAARead::testHashDiffersIfIdDiffers",
"test/test_reads.py::TestSSAARead::testHashDiffersIfSequenceDiffers",
"test/test_reads.py::TestSSAARead::testHashDiffersIfStructureDiffers",
"test/test_reads.py::TestSSAARead::testHashViaDict",
"test/test_reads.py::TestSSAARead::testHashViaSet",
"test/test_reads.py::TestSSAARead::testKeepSites",
"test/test_reads.py::TestSSAARead::testKeepSitesAllSites",
"test/test_reads.py::TestSSAARead::testKeepSitesNoSites",
"test/test_reads.py::TestSSAARead::testKeepSitesOutOfRange",
"test/test_reads.py::TestSSAARead::testReads",
"test/test_reads.py::TestSSAARead::testRemoveSites",
"test/test_reads.py::TestSSAARead::testRemoveSitesAllSites",
"test/test_reads.py::TestSSAARead::testRemoveSitesNoSites",
"test/test_reads.py::TestSSAARead::testRemoveSitesOutOfRange",
"test/test_reads.py::TestSSAARead::testSequenceLengthMatchesStructureLength",
"test/test_reads.py::TestSSAARead::testToDict",
"test/test_reads.py::TestSSAARead::testToString",
"test/test_reads.py::TestSSAARead::testToStringWithExplicitFastaFormat",
"test/test_reads.py::TestSSAARead::testToStringWithExplicitFastaSSFormat",
"test/test_reads.py::TestSSAARead::testToStringWithStructureSuffix",
"test/test_reads.py::TestSSAARead::testToStringWithUnknownFormat",
"test/test_reads.py::TestSSAAReadWithX::testCorrectAttributes",
"test/test_reads.py::TestSSAAReadWithX::testFromDict",
"test/test_reads.py::TestSSAAReadWithX::testGetitemFullCopy",
"test/test_reads.py::TestSSAAReadWithX::testGetitemId",
"test/test_reads.py::TestSSAAReadWithX::testGetitemLength",
"test/test_reads.py::TestSSAAReadWithX::testGetitemReturnsNewRead",
"test/test_reads.py::TestSSAAReadWithX::testGetitemReversed",
"test/test_reads.py::TestSSAAReadWithX::testGetitemSequence",
"test/test_reads.py::TestSSAAReadWithX::testGetitemSingleIndex",
"test/test_reads.py::TestSSAAReadWithX::testGetitemStructure",
"test/test_reads.py::TestSSAAReadWithX::testGetitemWithStep",
"test/test_reads.py::TestSSAAReadWithX::testHashDiffersIfIdDiffers",
"test/test_reads.py::TestSSAAReadWithX::testHashDiffersIfSequenceDiffers",
"test/test_reads.py::TestSSAAReadWithX::testHashDiffersIfStructureDiffers",
"test/test_reads.py::TestSSAAReadWithX::testHashViaDict",
"test/test_reads.py::TestSSAAReadWithX::testHashViaSet",
"test/test_reads.py::TestSSAAReadWithX::testKeepSites",
"test/test_reads.py::TestSSAAReadWithX::testKeepSitesAllSites",
"test/test_reads.py::TestSSAAReadWithX::testKeepSitesNoSites",
"test/test_reads.py::TestSSAAReadWithX::testKeepSitesOutOfRange",
"test/test_reads.py::TestSSAAReadWithX::testReads",
"test/test_reads.py::TestSSAAReadWithX::testRemoveSites",
"test/test_reads.py::TestSSAAReadWithX::testRemoveSitesAllSites",
"test/test_reads.py::TestSSAAReadWithX::testRemoveSitesNoSites",
"test/test_reads.py::TestSSAAReadWithX::testRemoveSitesOutOfRange",
"test/test_reads.py::TestSSAAReadWithX::testSequenceContainingX",
"test/test_reads.py::TestSSAAReadWithX::testSequenceLengthMatchesStructureLength",
"test/test_reads.py::TestSSAAReadWithX::testToDict",
"test/test_reads.py::TestSSAAReadWithX::testToString",
"test/test_reads.py::TestSSAAReadWithX::testToStringWithExplicitFastaFormat",
"test/test_reads.py::TestSSAAReadWithX::testToStringWithExplicitFastaSSFormat",
"test/test_reads.py::TestSSAAReadWithX::testToStringWithStructureSuffix",
"test/test_reads.py::TestSSAAReadWithX::testToStringWithUnknownFormat",
"test/test_reads.py::TestTranslatedRead::testExpectedAttributes",
"test/test_reads.py::TestTranslatedRead::testExpectedFrame",
"test/test_reads.py::TestTranslatedRead::testFromDict",
"test/test_reads.py::TestTranslatedRead::testId",
"test/test_reads.py::TestTranslatedRead::testIdReverseComplemented",
"test/test_reads.py::TestTranslatedRead::testMaximumORFLength",
"test/test_reads.py::TestTranslatedRead::testMaximumORFLengthNoStops",
"test/test_reads.py::TestTranslatedRead::testOutOfRangeFrame",
"test/test_reads.py::TestTranslatedRead::testReverseComplemented",
"test/test_reads.py::TestTranslatedRead::testSequence",
"test/test_reads.py::TestTranslatedRead::testToDict",
"test/test_reads.py::TestReadClassNameToClass::testNames",
"test/test_reads.py::TestReads::testEmptyInitialReads",
"test/test_reads.py::TestReads::testInitialReads",
"test/test_reads.py::TestReads::testManuallyAddedReads",
"test/test_reads.py::TestReads::testManuallyAddedReadsLength",
"test/test_reads.py::TestReads::testNoReads",
"test/test_reads.py::TestReads::testNoReadsLength",
"test/test_reads.py::TestReads::testRepeatedIter",
"test/test_reads.py::TestReads::testSaveAsFASTA",
"test/test_reads.py::TestReads::testSaveAsFASTQ",
"test/test_reads.py::TestReads::testSaveAsFASTQFailsOnReadWithNoQuality",
"test/test_reads.py::TestReads::testSaveFASTAIsDefault",
"test/test_reads.py::TestReads::testSaveReturnsReadCount",
"test/test_reads.py::TestReads::testSaveToFileDescriptor",
"test/test_reads.py::TestReads::testSaveWithUnknownFormat",
"test/test_reads.py::TestReads::testSaveWithUppercaseFormat",
"test/test_reads.py::TestReads::testSubclass",
"test/test_reads.py::TestReads::testSubclassLength",
"test/test_reads.py::TestReads::testSubclassWithAdditionalReads",
"test/test_reads.py::TestReads::testUnfilteredLengthAdditionalReads",
"test/test_reads.py::TestReads::testUnfilteredLengthAdditionalReadsAfterFiltering",
"test/test_reads.py::TestReads::testUnfilteredLengthBeforeIterating",
"test/test_reads.py::TestReads::testUnfilteredLengthInitialReads",
"test/test_reads.py::TestReads::testUnfilteredLengthInitialReadsAfterFiltering",
"test/test_reads.py::TestReads::testUnfilteredLengthInitialReadsIsReads",
"test/test_reads.py::TestReads::testUnfilteredLengthInitialReadsIsReadsWithAdditional",
"test/test_reads.py::TestReads::testUnfilteredLengthInitialSubclassThenFiltered",
"test/test_reads.py::TestReads::testUnfilteredLengthInitialSubclassWithAdditionalThenFiltered",
"test/test_reads.py::TestReads::testUnfilteredLengthInitialSubclassWithNoLen",
"test/test_reads.py::TestReads::testUnfilteredLengthNoReads",
"test/test_reads.py::TestReadsFiltering::testAddFiltersThenClearFilters",
"test/test_reads.py::TestReadsFiltering::testFilterBlacklist",
"test/test_reads.py::TestReadsFiltering::testFilterDoNotRemoveDescriptions",
"test/test_reads.py::TestReadsFiltering::testFilterDuplicates",
"test/test_reads.py::TestReadsFiltering::testFilterDuplicatesById",
"test/test_reads.py::TestReadsFiltering::testFilterHead",
"test/test_reads.py::TestReadsFiltering::testFilterHeadZero",
"test/test_reads.py::TestReadsFiltering::testFilterKeepSequences",
"test/test_reads.py::TestReadsFiltering::testFilterKeepSequencesNoSequences",
"test/test_reads.py::TestReadsFiltering::testFilterNegativeRegex",
"test/test_reads.py::TestReadsFiltering::testFilterNoArgs",
"test/test_reads.py::TestReadsFiltering::testFilterOnLengthEverythingMatches",
"test/test_reads.py::TestReadsFiltering::testFilterOnLengthNothingMatches",
"test/test_reads.py::TestReadsFiltering::testFilterOnMaxLength",
"test/test_reads.py::TestReadsFiltering::testFilterOnMinLength",
"test/test_reads.py::TestReadsFiltering::testFilterPositiveRegex",
"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfFiveFromFiveReads",
"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfFiveFromOneRead",
"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfOneFromOneRead",
"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfTwoFromFiveReads",
"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetOfZeroReads",
"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetSizeZeroNoReads",
"test/test_reads.py::TestReadsFiltering::testFilterRandomSubsetSizeZeroTwoReads",
"test/test_reads.py::TestReadsFiltering::testFilterRemoveDescriptions",
"test/test_reads.py::TestReadsFiltering::testFilterRemoveGaps",
"test/test_reads.py::TestReadsFiltering::testFilterRemoveGapsWithQuality",
"test/test_reads.py::TestReadsFiltering::testFilterRemoveSequences",
"test/test_reads.py::TestReadsFiltering::testFilterRemoveSequencesNoSequences",
"test/test_reads.py::TestReadsFiltering::testFilterReturnsReadInstance",
"test/test_reads.py::TestReadsFiltering::testFilterTruncateTitles",
"test/test_reads.py::TestReadsFiltering::testFilterWhitelist",
"test/test_reads.py::TestReadsFiltering::testFilterWithMinLengthEqualToMaxLength",
"test/test_reads.py::TestReadsFiltering::testFilterWithModifierThatChangesIds",
"test/test_reads.py::TestReadsFiltering::testFilterWithModifierThatOmits",
"test/test_reads.py::TestReadsFiltering::testFilterWithModifierThatOmitsAndChangesIds",
"test/test_reads.py::TestReadsFiltering::testFilteredReadsInstanceHasExpectedLength",
"test/test_reads.py::TestReadsFiltering::testKeepSites",
"test/test_reads.py::TestReadsFiltering::testKeepSitesAllSites",
"test/test_reads.py::TestReadsFiltering::testKeepSitesNoSites",
"test/test_reads.py::TestReadsFiltering::testKeepSitesOutOfRange",
"test/test_reads.py::TestReadsFiltering::testKeepSitesWithQuality",
"test/test_reads.py::TestReadsFiltering::testLineNumberFile",
"test/test_reads.py::TestReadsFiltering::testLineNumberFileEmpty",
"test/test_reads.py::TestReadsFiltering::testLineNumberFileFirstLineTooSmall",
"test/test_reads.py::TestReadsFiltering::testLineNumberFileNonAscending",
"test/test_reads.py::TestReadsFiltering::testLineNumberFileRunOutOfSequences",
"test/test_reads.py::TestReadsFiltering::testRemoveAndKeepSites",
"test/test_reads.py::TestReadsFiltering::testRemoveSites",
"test/test_reads.py::TestReadsFiltering::testRemoveSitesAllSites",
"test/test_reads.py::TestReadsFiltering::testRemoveSitesNoSites",
"test/test_reads.py::TestReadsFiltering::testRemoveSitesOutOfRange",
"test/test_reads.py::TestReadsFiltering::testRemoveSitesWithQuality",
"test/test_reads.py::TestReadsFiltering::testSampleFractionAndNoTrueLengthRaisesValueError",
"test/test_reads.py::TestReadsFiltering::testSampleFractionAndRandomSubsetRaisesValueError",
"test/test_reads.py::TestReadsFiltering::testSampleFractionOne",
"test/test_reads.py::TestReadsFiltering::testSampleFractionPointOne",
"test/test_reads.py::TestReadsFiltering::testSampleFractionZero",
"test/test_reads.py::TestReadsInRAM::testAdd",
"test/test_reads.py::TestReadsInRAM::testFromReads",
"test/test_reads.py::TestReadsInRAM::testNoReads",
"test/test_reads.py::TestReadsInRAM::testOneReadIndex",
"test/test_reads.py::TestReadsInRAM::testOneReadLength",
"test/test_reads.py::TestReadsInRAM::testOneReadList",
"test/test_reads.py::TestReadsInRAM::testSetItem",
"test/test_reads.py::TestReadsInRAM::testTwoReadsIndex",
"test/test_reads.py::TestReadsInRAM::testTwoReadsLength",
"test/test_reads.py::TestReadsInRAM::testTwoReadsList",
"test/test_reads.py::TestSummarizePosition::testCorrectFrequencies",
"test/test_reads.py::TestSummarizePosition::testExcludeShortSequences",
"test/test_reads.py::TestSummarizePosition::testFrequenciesNoReads",
"test/test_reads.py::TestSummarizePosition::testIndexLargerThanSequenceLength",
"test/test_reads.py::TestSummarizePosition::testNumberOfExclusionsNoReads",
"test/test_reads.py::TestSitesMatching::testAllMatches",
"test/test_reads.py::TestSitesMatching::testIgnoreCase",
"test/test_reads.py::TestSitesMatching::testMatchCase",
"test/test_reads.py::TestSitesMatching::testMultipleReadsAll",
"test/test_reads.py::TestSitesMatching::testMultipleReadsAllWithDifferingLengths",
"test/test_reads.py::TestSitesMatching::testMultipleReadsAny",
"test/test_reads.py::TestSitesMatching::testMultipleReadsAnyWithDifferingLengths",
"test/test_reads.py::TestSitesMatching::testNoMatches",
"test/test_reads.py::TestSitesMatching::testPartialMatch"
] | [] | MIT License | 2,592 | 1,404 | [
"dark/__init__.py",
"dark/filter.py",
"dark/reads.py"
] |
|
conan-io__conan-2952 | c3a6ed5dc7b5e27ac69191e36aa7592e47ce7759 | 2018-05-29 10:29:36 | c3a6ed5dc7b5e27ac69191e36aa7592e47ce7759 | diff --git a/conans/client/build/autotools_environment.py b/conans/client/build/autotools_environment.py
index 924161e9c..9bf4bd3e8 100644
--- a/conans/client/build/autotools_environment.py
+++ b/conans/client/build/autotools_environment.py
@@ -14,6 +14,7 @@ from conans.client.tools.win import unix_path
from conans.tools import (environment_append, args_to_string, cpu_count, cross_building,
detected_architecture, get_gnu_triplet)
from conans.errors import ConanException
+from conans.util.files import get_abs_path
class AutoToolsBuildEnvironment(object):
@@ -131,7 +132,9 @@ class AutoToolsBuildEnvironment(object):
triplet_args.append("--target=%s" % (target or self.target))
if pkg_config_paths:
- pkg_env = {"PKG_CONFIG_PATH": os.pathsep.join(pkg_config_paths)}
+ pkg_env = {"PKG_CONFIG_PATH":
+ os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)
+ for f in pkg_config_paths)}
else:
# If we are using pkg_config generator automate the pcs location, otherwise it could
# read wrong files
diff --git a/conans/client/build/cmake.py b/conans/client/build/cmake.py
index 9964d0836..b5f8cb843 100644
--- a/conans/client/build/cmake.py
+++ b/conans/client/build/cmake.py
@@ -12,7 +12,7 @@ from conans.errors import ConanException
from conans.model.conan_file import ConanFile
from conans.model.version import Version
from conans.util.env_reader import get_env
-from conans.util.files import mkdir
+from conans.util.files import mkdir, get_abs_path
from conans.tools import cpu_count, args_to_string
from conans import tools
from conans.util.log import logger
@@ -28,7 +28,8 @@ def _get_env_cmake_system_name():
class CMake(object):
def __init__(self, conanfile, generator=None, cmake_system_name=True,
- parallel=True, build_type=None, toolset=None, make_program=None, set_cmake_flags=False):
+ parallel=True, build_type=None, toolset=None, make_program=None,
+ set_cmake_flags=False):
"""
:param settings_or_conanfile: Conanfile instance (or settings for retro compatibility)
:param generator: Generator name to use or none to autodetect
@@ -370,7 +371,8 @@ class CMake(object):
self._conanfile.run(command)
def configure(self, args=None, defs=None, source_dir=None, build_dir=None,
- source_folder=None, build_folder=None, cache_build_folder=None):
+ source_folder=None, build_folder=None, cache_build_folder=None,
+ pkg_config_paths=None):
# TODO: Deprecate source_dir and build_dir in favor of xxx_folder
if not self._conanfile.should_configure:
@@ -387,12 +389,26 @@ class CMake(object):
defs_to_string(defs),
args_to_string([source_dir])
])
- command = "cd %s && cmake %s" % (args_to_string([self.build_dir]), arg_list)
- if platform.system() == "Windows" and self.generator == "MinGW Makefiles":
- with tools.remove_from_path("sh"):
- self._run(command)
+
+
+ if pkg_config_paths:
+ pkg_env = {"PKG_CONFIG_PATH":
+ os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)
+ for f in pkg_config_paths)}
else:
- self._run(command)
+ # If we are using pkg_config generator automate the pcs location, otherwise it could
+ # read wrong files
+ set_env = "pkg_config" in self._conanfile.generators \
+ and "PKG_CONFIG_PATH" not in os.environ
+ pkg_env = {"PKG_CONFIG_PATH": self._conanfile.install_folder} if set_env else {}
+
+ with tools.environment_append(pkg_env):
+ command = "cd %s && cmake %s" % (args_to_string([self.build_dir]), arg_list)
+ if platform.system() == "Windows" and self.generator == "MinGW Makefiles":
+ with tools.remove_from_path("sh"):
+ self._conanfile.run(command)
+ else:
+ self._conanfile.run(command)
def build(self, args=None, build_dir=None, target=None):
if not self._conanfile.should_build:
diff --git a/conans/client/build/meson.py b/conans/client/build/meson.py
index 1545a59d7..b8a7ff4b3 100644
--- a/conans/client/build/meson.py
+++ b/conans/client/build/meson.py
@@ -4,7 +4,7 @@ from conans import tools
from conans.client import join_arguments, defs_to_string
from conans.errors import ConanException
from conans.tools import args_to_string
-from conans.util.files import mkdir
+from conans.util.files import mkdir, get_abs_path
class Meson(object):
@@ -53,14 +53,6 @@ class Meson(object):
def build_folder(self, value):
self.build_dir = value
- @staticmethod
- def _get_dir(folder, origin):
- if folder:
- if os.path.isabs(folder):
- return folder
- return os.path.join(origin, folder)
- return origin
-
def _get_dirs(self, source_folder, build_folder, source_dir, build_dir, cache_build_folder):
if (source_folder or build_folder) and (source_dir or build_dir):
raise ConanException("Use 'build_folder'/'source_folder'")
@@ -69,11 +61,11 @@ class Meson(object):
build_ret = build_dir or self.build_dir or self._conanfile.build_folder
source_ret = source_dir or self._conanfile.source_folder
else:
- build_ret = self._get_dir(build_folder, self._conanfile.build_folder)
- source_ret = self._get_dir(source_folder, self._conanfile.source_folder)
+ build_ret = get_abs_path(build_folder, self._conanfile.build_folder)
+ source_ret = get_abs_path(source_folder, self._conanfile.source_folder)
if self._conanfile.in_local_cache and cache_build_folder:
- build_ret = self._get_dir(cache_build_folder, self._conanfile.build_folder)
+ build_ret = get_abs_path(cache_build_folder, self._conanfile.build_folder)
return source_ret, build_ret
@@ -90,7 +82,7 @@ class Meson(object):
cache_build_folder)
if pkg_config_paths:
- pc_paths = os.pathsep.join(self._get_dir(f, self._conanfile.install_folder)
+ pc_paths = os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)
for f in pkg_config_paths)
else:
pc_paths = self._conanfile.install_folder
diff --git a/conans/util/files.py b/conans/util/files.py
index d8492cd72..8c6a859a1 100644
--- a/conans/util/files.py
+++ b/conans/util/files.py
@@ -181,6 +181,14 @@ def relative_dirs(path):
return ret
+def get_abs_path(folder, origin):
+ if folder:
+ if os.path.isabs(folder):
+ return folder
+ return os.path.join(origin, folder)
+ return origin
+
+
def _change_permissions(func, path, exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
| CMake build wrapper should set PKG_CONFIG_PATH
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant.
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
conan version 1.0.4 or master
A lot of cmake scripts use both `find_package` (`FindFoo.cmake`-based) and `pkg_check_modules` (`pkg-config`-based). CMake build wrapper should automatically provide `PKG_CONFIG_PATH` env var set to build directory or to recipe-provided paths. Exact same behavior is seen in `AutoToolsBuildEnviroment` or `Meson`. `CMake` should not be an exception. | conan-io/conan | diff --git a/conans/test/build_helpers/autotools_configure_test.py b/conans/test/build_helpers/autotools_configure_test.py
index fed69a0a3..9f0fcd983 100644
--- a/conans/test/build_helpers/autotools_configure_test.py
+++ b/conans/test/build_helpers/autotools_configure_test.py
@@ -1,16 +1,18 @@
+import os
import platform
import unittest
+from collections import namedtuple
-from conans.client.build.autotools_environment import AutoToolsBuildEnvironment
from conans import tools
+from conans.client.build.autotools_environment import AutoToolsBuildEnvironment
from conans.client.tools.oss import cpu_count
+from conans.model.ref import ConanFileReference
+from conans.model.settings import Settings
from conans.paths import CONANFILE
-from conans.test.utils.conanfile import MockConanfile, MockSettings, MockOptions
+from conans.test.build_helpers.cmake_test import ConanFileMock
from conans.test.util.tools_test import RunnerMock
+from conans.test.utils.conanfile import MockConanfile, MockSettings, MockOptions
from conans.test.utils.tools import TestClient
-from conans.test.build_helpers.cmake_test import ConanFileMock
-from conans.model.settings import Settings
-from collections import namedtuple
class AutoToolsConfigureTest(unittest.TestCase):
@@ -416,9 +418,12 @@ class HelloConan(ConanFile):
self.assertIn("PKG_CONFIG_PATH=%s" % client.client_cache.conan_folder, client.out)
client.save({CONANFILE: conanfile % ("'pkg_config'",
- "pkg_config_paths=['/tmp/hello', '/tmp/foo']")})
+ "pkg_config_paths=['/tmp/hello', 'foo']")})
client.run("create . conan/testing")
- self.assertIn("PKG_CONFIG_PATH=/tmp/hello:/tmp/foo", client.out)
+ ref = ConanFileReference.loads("Hello/1.2.1@conan/testing")
+ builds_folder = client.client_cache.builds(ref)
+ bf = os.path.join(builds_folder, os.listdir(builds_folder)[0])
+ self.assertIn("PKG_CONFIG_PATH=/tmp/hello:%s/foo" % bf, client.out)
def cross_build_command_test(self):
runner = RunnerMock()
diff --git a/conans/test/build_helpers/cmake_test.py b/conans/test/build_helpers/cmake_test.py
index 812c53444..09aab1631 100644
--- a/conans/test/build_helpers/cmake_test.py
+++ b/conans/test/build_helpers/cmake_test.py
@@ -688,6 +688,38 @@ build_type: [ Release]
cmake.configure()
self.assertNotIn(self.tempdir, conanfile.path)
+ def test_pkg_config_path(self):
+ conanfile = ConanFileMock()
+ conanfile.generators = ["pkg_config"]
+ conanfile.install_folder = "/my_install/folder/"
+ settings = Settings.loads(default_settings_yml)
+ settings.os = "Windows"
+ settings.compiler = "Visual Studio"
+ settings.compiler.version = "12"
+ settings.arch = "x86"
+ conanfile.settings = settings
+ cmake = CMake(conanfile)
+ cmake.configure()
+ self.assertEquals(conanfile.captured_env["PKG_CONFIG_PATH"], "/my_install/folder/")
+
+ conanfile.generators = []
+ cmake = CMake(conanfile)
+ cmake.configure()
+ self.assertNotIn("PKG_CONFIG_PATH", conanfile.captured_env)
+
+ cmake = CMake(conanfile)
+ cmake.configure(pkg_config_paths=["reldir1", "/abspath2/to/other"])
+ self.assertEquals(conanfile.captured_env["PKG_CONFIG_PATH"],
+ os.path.pathsep.join(["/my_install/folder/reldir1",
+ "/abspath2/to/other"]))
+
+ # If there is already a PKG_CONFIG_PATH do not set it
+ conanfile.generators = ["pkg_config"]
+ cmake = CMake(conanfile)
+ with tools.environment_append({"PKG_CONFIG_PATH": "do_not_mess_with_this"}):
+ cmake.configure()
+ self.assertEquals(conanfile.captured_env["PKG_CONFIG_PATH"], "do_not_mess_with_this")
+
def test_shared(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
@@ -843,7 +875,10 @@ class ConanFileMock(ConanFile):
self.should_configure = True
self.should_build = True
self.should_install = True
+ self.generators = []
+ self.captured_env = {}
def run(self, command):
self.command = command
self.path = os.environ["PATH"]
+ self.captured_env = {key: value for key, value in os.environ.items()}
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 4
} | 1.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"nose",
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"conans/requirements.txt",
"conans/requirements_osx.txt",
"conans/requirements_server.txt",
"conans/requirements_dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asn1crypto==1.5.1
astroid==1.6.6
attrs==22.2.0
beautifulsoup4==4.12.3
bottle==0.12.25
certifi==2021.5.30
cffi==1.15.1
charset-normalizer==2.0.12
codecov==2.1.13
colorama==0.3.9
-e git+https://github.com/conan-io/conan.git@c3a6ed5dc7b5e27ac69191e36aa7592e47ce7759#egg=conan
coverage==4.2
cryptography==2.1.4
deprecation==2.0.7
distro==1.1.0
fasteners==0.19
future==0.16.0
idna==3.10
importlib-metadata==4.8.3
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==1.3.0
ndg-httpsclient==0.4.4
node-semver==0.2.0
nose==1.3.7
packaging==21.3
parameterized==0.8.1
patch==1.16
pbr==6.1.1
pluggy==1.0.0
pluginbase==0.7
py==1.11.0
pyasn==1.5.0b7
pyasn1==0.5.1
pycparser==2.21
Pygments==2.14.0
PyJWT==1.7.1
pylint==1.8.4
pyOpenSSL==17.5.0
pyparsing==3.1.4
pytest==7.0.1
PyYAML==3.12
requests==2.27.1
six==1.17.0
soupsieve==2.3.2.post1
tomli==1.2.3
typing_extensions==4.1.1
urllib3==1.26.20
waitress==2.0.0
WebOb==1.8.9
WebTest==2.0.35
wrapt==1.16.0
zipp==3.6.0
| name: conan
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asn1crypto==1.5.1
- astroid==1.6.6
- attrs==22.2.0
- beautifulsoup4==4.12.3
- bottle==0.12.25
- cffi==1.15.1
- charset-normalizer==2.0.12
- codecov==2.1.13
- colorama==0.3.9
- coverage==4.2
- cryptography==2.1.4
- deprecation==2.0.7
- distro==1.1.0
- fasteners==0.19
- future==0.16.0
- idna==3.10
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==1.3.0
- ndg-httpsclient==0.4.4
- node-semver==0.2.0
- nose==1.3.7
- packaging==21.3
- parameterized==0.8.1
- patch==1.16
- pbr==6.1.1
- pluggy==1.0.0
- pluginbase==0.7
- py==1.11.0
- pyasn==1.5.0b7
- pyasn1==0.5.1
- pycparser==2.21
- pygments==2.14.0
- pyjwt==1.7.1
- pylint==1.8.4
- pyopenssl==17.5.0
- pyparsing==3.1.4
- pytest==7.0.1
- pyyaml==3.12
- requests==2.27.1
- six==1.17.0
- soupsieve==2.3.2.post1
- tomli==1.2.3
- typing-extensions==4.1.1
- urllib3==1.26.20
- waitress==2.0.0
- webob==1.8.9
- webtest==2.0.35
- wrapt==1.16.0
- zipp==3.6.0
prefix: /opt/conda/envs/conan
| [
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_pkg_config_path"
] | [
"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_pkg_config_paths"
] | [
"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_cppstd",
"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_make_targets_install",
"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_mocked_methods",
"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_previous_env",
"conans/test/build_helpers/autotools_configure_test.py::AutoToolsConfigureTest::test_variables",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_clean_sh_path",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_cmake_system_version_android",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_cores_ancient_visual",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_deprecated_behaviour",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_missing_settings",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_run_tests",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_shared",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_sysroot",
"conans/test/build_helpers/cmake_test.py::CMakeTest::test_verbose"
] | [] | MIT License | 2,594 | 1,786 | [
"conans/client/build/autotools_environment.py",
"conans/client/build/cmake.py",
"conans/client/build/meson.py",
"conans/util/files.py"
] |
|
ingresso-group__pyticketswitch-70 | a22c4a3679174b1798acda89e59559930eb1f1a3 | 2018-05-29 18:32:26 | a22c4a3679174b1798acda89e59559930eb1f1a3 | codecov[bot]: # [Codecov](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=h1) Report
> Merging [#70](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=desc) into [master](https://codecov.io/gh/ingresso-group/pyticketswitch/commit/a22c4a3679174b1798acda89e59559930eb1f1a3?src=pr&el=desc) will **increase** coverage by `0.02%`.
> The diff coverage is `100%`.
[](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=tree)
```diff
@@ Coverage Diff @@
## master #70 +/- ##
==========================================
+ Coverage 99.48% 99.51% +0.02%
==========================================
Files 37 37
Lines 1764 2046 +282
==========================================
+ Hits 1755 2036 +281
- Misses 9 10 +1
```
| [Impacted Files](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=tree) | Coverage Δ | |
|---|---|---|
| [pyticketswitch/reservation.py](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70/diff?src=pr&el=tree#diff-cHl0aWNrZXRzd2l0Y2gvcmVzZXJ2YXRpb24ucHk=) | `100% <100%> (ø)` | :arrow_up: |
| [pyticketswitch/exceptions.py](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70/diff?src=pr&el=tree#diff-cHl0aWNrZXRzd2l0Y2gvZXhjZXB0aW9ucy5weQ==) | `100% <100%> (ø)` | :arrow_up: |
| [pyticketswitch/trolley.py](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70/diff?src=pr&el=tree#diff-cHl0aWNrZXRzd2l0Y2gvdHJvbGxleS5weQ==) | `94.73% <100%> (+0.09%)` | :arrow_up: |
| [pyticketswitch/client.py](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70/diff?src=pr&el=tree#diff-cHl0aWNrZXRzd2l0Y2gvY2xpZW50LnB5) | `99.31% <100%> (+0.13%)` | :arrow_up: |
| [pyticketswitch/availability.py](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70/diff?src=pr&el=tree#diff-cHl0aWNrZXRzd2l0Y2gvYXZhaWxhYmlsaXR5LnB5) | `100% <0%> (ø)` | :arrow_up: |
| [pyticketswitch/mixins.py](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70/diff?src=pr&el=tree#diff-cHl0aWNrZXRzd2l0Y2gvbWl4aW5zLnB5) | `100% <0%> (ø)` | :arrow_up: |
------
[Continue to review full report at Codecov](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=continue).
> **Legend** - [Click here to learn more](https://docs.codecov.io/docs/codecov-delta)
> `Δ = absolute <relative> (impact)`, `ø = not affected`, `? = missing data`
> Powered by [Codecov](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=footer). Last update [a22c4a3...3a2e91d](https://codecov.io/gh/ingresso-group/pyticketswitch/pull/70?src=pr&el=lastupdated). Read the [comment docs](https://docs.codecov.io/docs/pull-request-comments).
| diff --git a/pyticketswitch/client.py b/pyticketswitch/client.py
index 8b00be5..7726b43 100644
--- a/pyticketswitch/client.py
+++ b/pyticketswitch/client.py
@@ -1081,7 +1081,8 @@ class Client(object):
def get_trolley(self, token=None, number_of_seats=None, discounts=None,
seats=None, send_codes=None, ticket_type_code=None,
performance_id=None, price_band_code=None,
- item_numbers_to_remove=None, **kwargs):
+ item_numbers_to_remove=None,
+ raise_on_unavailable_order=False, **kwargs):
"""Retrieve the contents of a trolley from the API.
@@ -1097,14 +1098,17 @@ class Client(object):
seats (list): list of seat IDs.
send_codes (dict): send codes indexed on backend source
code.
- ticket_type_code: (string): code of ticket type to add to
+ ticket_type_code (string): code of ticket type to add to
the trolley.
- performance_id: (string): id of the performance to add to
+ performance_id (string): id of the performance to add to
the trolley.
- price_band_code: (string): code of price band to add to
+ price_band_code (string): code of price band to add to
the trolley.
- item_numbers_to_remove: (list): list of item numbers to
+ item_numbers_to_remove (list): list of item numbers to
remove from trolley.
+ raise_on_unavailable_order (bool): When set to ``True`` this method
+ will raise an exception when the API was not able to add an
+ order to the trolley as it was unavailable.
**kwargs: arbitary additional raw keyword arguments to add the
parameters.
@@ -1116,6 +1120,9 @@ class Client(object):
Raises:
InvalidParametersError: when there is an issue with the provided
parameters.
+ OrderUnavailableError: when ``raise_on_unavailable_order`` is set
+ to ``True`` and the requested addition to a trolley was
+ unavailable.
.. _`/f13/trolley.v1`: http://docs.ingresso.co.uk/#trolley
@@ -1133,6 +1140,11 @@ class Client(object):
trolley = Trolley.from_api_data(response)
meta = CurrencyMeta.from_api_data(response)
+ if raise_on_unavailable_order:
+ if trolley and trolley.input_contained_unavailable_order:
+ raise exceptions.OrderUnavailableError(
+ "inputs contained unavailable order")
+
return trolley, meta
def get_upsells(self, token=None, number_of_seats=None, discounts=None,
@@ -1278,7 +1290,8 @@ class Client(object):
def make_reservation(self, token=None, number_of_seats=None, discounts=None,
seats=None, send_codes=None, ticket_type_code=None,
performance_id=None, price_band_code=None,
- item_numbers_to_remove=None, **kwargs):
+ item_numbers_to_remove=None,
+ raise_on_unavailable_order=False, **kwargs):
"""Attempt to reserve all the items in the given trolley
@@ -1314,6 +1327,9 @@ class Client(object):
the trolley
item_numbers_to_remove: (list): list of item numbers to
remove from trolley.
+ raise_on_unavailable_order (bool): When set to ``True`` this method
+ will raise an exception when the API was not able to add an
+ order to the trolley as it was unavailable.
**kwargs: arbitary additional raw keyword arguments to add the
parameters.
@@ -1325,6 +1341,9 @@ class Client(object):
Raises:
InvalidParametersError: when there is an issue with the provided
parameters.
+ OrderUnavailableError: when ``raise_on_unavailable_order`` is set
+ to ``True`` and the requested addition to a trolley was
+ unavailable.
.. _`/f13/reserve.v1`: http://docs.ingresso.co.uk/#reserve
@@ -1342,15 +1361,22 @@ class Client(object):
reservation = Reservation.from_api_data(response)
meta = CurrencyMeta.from_api_data(response)
+ if raise_on_unavailable_order:
+ if reservation and reservation.input_contained_unavailable_order:
+ raise exceptions.OrderUnavailableError(
+ "inputs contained unavailable order")
+
return reservation, meta
- def release_reservation(self, transaction_uuid):
+ def release_reservation(self, transaction_uuid, **kwargs):
"""Release an existing reservation.
Wraps `/f13/release.v1`_
Args:
transaction_uuid (str): the identifier of the reservaiton.
+ **kwargs: arbitary additional raw keyword arguments to add the
+ parameters.
Returns:
bool: :obj:`True` if the reservation was successfully released
@@ -1361,7 +1387,8 @@ class Client(object):
"""
params = {'transaction_uuid': transaction_uuid}
- response = self.make_request('release.v1', params, method=POST)
+ kwargs.update(params)
+ response = self.make_request('release.v1', kwargs, method=POST)
return response.get('released_ok', False)
diff --git a/pyticketswitch/exceptions.py b/pyticketswitch/exceptions.py
index f88f636..3aef367 100644
--- a/pyticketswitch/exceptions.py
+++ b/pyticketswitch/exceptions.py
@@ -51,3 +51,7 @@ class BackendThrottleError(BackendError):
class CallbackGoneError(APIError):
pass
+
+
+class OrderUnavailableError(PyticketswitchError):
+ pass
diff --git a/pyticketswitch/reservation.py b/pyticketswitch/reservation.py
index b12d2bb..a75087e 100644
--- a/pyticketswitch/reservation.py
+++ b/pyticketswitch/reservation.py
@@ -46,9 +46,12 @@ class Reservation(Status):
"""
- def __init__(self, unreserved_orders=None, *args, **kwargs):
+ def __init__(self, unreserved_orders=None,
+ input_contained_unavailable_order=False, *args, **kwargs):
+
super(Reservation, self).__init__(*args, **kwargs)
self.unreserved_orders = unreserved_orders
+ self.input_contained_unavailable_order = input_contained_unavailable_order
@classmethod
def from_api_data(cls, data):
@@ -75,7 +78,9 @@ class Reservation(Status):
for order in raw_unreserved_orders
]
- inst.unreserved_orders=unreserved_orders
+ inst.unreserved_orders = unreserved_orders
+ inst.input_contained_unavailable_order = data.get(
+ 'input_contained_unavailable_order', False)
return inst
diff --git a/pyticketswitch/trolley.py b/pyticketswitch/trolley.py
index 0a78e2e..df54c75 100644
--- a/pyticketswitch/trolley.py
+++ b/pyticketswitch/trolley.py
@@ -25,11 +25,14 @@ class Trolley(JSONMixin, object):
order_count (int): the number of orders in the trolley.
purchase_result (:class:`PurchaseResult <pyticketswitch.callout.Callout>`):
the result of the purchase attempt when available.
-
+ input_contained_unavailable_order (bool): indicates that the call used
+ to create or modify this trolley object included at least one order
+ that was not available.
"""
def __init__(self, token=None, transaction_uuid=None, transaction_id=None,
bundles=None, discarded_orders=None, minutes_left=None,
- order_count=None, purchase_result=None):
+ order_count=None, purchase_result=None,
+ input_contained_unavailable_order=False):
self.token = token
self.transaction_uuid = transaction_uuid
self.transaction_id = transaction_id
@@ -38,6 +41,7 @@ class Trolley(JSONMixin, object):
self.minutes_left = minutes_left
self.order_count = order_count
self.purchase_result = purchase_result
+ self.input_contained_unavailable_order = input_contained_unavailable_order
@classmethod
def from_api_data(cls, data):
@@ -82,6 +86,8 @@ class Trolley(JSONMixin, object):
'transaction_uuid': raw_contents.get('transaction_uuid'),
'transaction_id': raw_contents.get('transaction_id'),
'order_count': data.get('trolley_order_count'),
+ 'input_contained_unavailable_order': data.get(
+ 'input_contained_unavailable_order', False),
}
minutes = data.get('minutes_left_on_reserve')
| missing "input_contained_unavailable_order" flag in trolley/reservation response
Currently when you attempt to add something to a trolley that is not available (sold out, performance in the past, max tickets per order exceeded, etc) it just appears as an empty trolley without any indication that something has gone wrong.
The API returns the `input_contained_unavailable_order` flag in it's response from `trolley.v1` and `reserve.v1`, and this should be added to the trolley object. I would suggest we should look at raising an exception as well. | ingresso-group/pyticketswitch | diff --git a/tests/test_client.py b/tests/test_client.py
index 6dfc234..c339059 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -1320,6 +1320,34 @@ class TestClient:
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
+ def test_get_trolley_with_unavailable_order(self, client, monkeypatch):
+ """
+ This test is to check that an unavailable order doesn't raise
+ any exceptions unless `raise_on_unavailable_order` is set to true
+ """
+ response = {
+ 'trolley_contents': {},
+ 'trolley_token': 'DEF456',
+ 'currency_code': 'gbp',
+ 'input_contained_unavailable_order': True,
+ 'currency_details': {
+ 'gbp': {
+ 'currency_code': 'gbp',
+ }
+ }
+ }
+
+ mock_make_request = Mock(return_value=response)
+ monkeypatch.setattr(client, 'make_request', mock_make_request)
+
+ # this should not raise any exceptions
+ client.get_trolley()
+
+ # but this should
+ with pytest.raises(exceptions.OrderUnavailableError):
+ client.get_trolley(raise_on_unavailable_order=True)
+
+
def test_get_upsells(self, client, monkeypatch):
# fakes
response = {
@@ -1409,6 +1437,26 @@ class TestClient:
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
+ def test_make_reservation_with_unavailable_order(self, client, monkeypatch):
+ """
+ This test is to check that an unavailable order doesn't raise
+ any exceptions unless `raise_on_unavailable_order` is set to true
+ """
+ data = {
+ "input_contained_unavailable_order": True,
+ "unreserved_orders": [],
+ }
+
+ mock_make_request = Mock(return_value=data)
+ monkeypatch.setattr(client, 'make_request', mock_make_request)
+
+ # this should not raise any exceptions
+ client.make_reservation()
+
+ # but this should
+ with pytest.raises(exceptions.OrderUnavailableError):
+ client.make_reservation(raise_on_unavailable_order=True)
+
def test_get_status(self, client, monkeypatch):
response = {
'trolley_contents': {
diff --git a/tests/test_reservation.py b/tests/test_reservation.py
index 91c0895..28bbf74 100644
--- a/tests/test_reservation.py
+++ b/tests/test_reservation.py
@@ -59,3 +59,13 @@ class TestReservation:
assert len(reservation.unreserved_orders) == 1
assert reservation.minutes_left == 15
+
+ def test_from_api_data_with_unavailable_orders(self):
+ data = {
+ "input_contained_unavailable_order": True,
+ "unreserved_orders": [],
+ }
+
+ reservation = Reservation.from_api_data(data)
+
+ assert reservation.input_contained_unavailable_order is True
diff --git a/tests/test_trolley.py b/tests/test_trolley.py
index fb9b9df..0370757 100644
--- a/tests/test_trolley.py
+++ b/tests/test_trolley.py
@@ -68,6 +68,23 @@ class TestTrolley:
assert trolley.discarded_orders[0].item == 3
assert trolley.discarded_orders[1].item == 6
+ def test_from_api_data_with_empty_trolley(self):
+ data = {
+ "discarded_orders": [],
+ "input_contained_unavailable_order": True,
+ "trolley_token": "abc123",
+ "trolley_token_contents": {
+ "trolley_bundle_count": 0,
+ "trolley_order_count": 0
+ }
+ }
+
+ trolley = Trolley.from_api_data(data)
+
+ assert trolley.token == 'abc123'
+ assert trolley.input_contained_unavailable_order is True
+
+
def test_get_events(self):
event_one = Event(id_='abc123')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 4
} | 2.3 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"flake8",
"pylint",
"pytest",
"behave"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements/test.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | astroid==2.11.7
attrs==22.2.0
behave==1.2.6
certifi==2021.5.30
coverage==6.2
dill==0.3.4
distlib==0.3.9
filelock==3.4.1
flake8==5.0.4
idna==3.10
importlib-metadata==1.7.0
importlib-resources==5.4.0
iniconfig==1.1.1
isort==5.10.1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mock==5.2.0
multidict==5.2.0
packaging==21.3
parse==1.20.2
parse-type==0.6.0
platformdirs==2.4.0
pluggy==0.13.1
py==1.11.0
pycodestyle==2.9.1
pyflakes==2.5.0
PyHamcrest==2.1.0
pylint==2.13.9
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
-e git+https://github.com/ingresso-group/pyticketswitch.git@a22c4a3679174b1798acda89e59559930eb1f1a3#egg=pyticketswitch
PyYAML==6.0.1
requests==2.9.1
requests-mock==1.11.0
six==1.11.0
toml==0.10.2
tomli==1.2.3
tox==3.14.3
typed-ast==1.5.5
typing_extensions==4.1.1
vcrpy==4.1.1
virtualenv==20.16.2
wrapt==1.16.0
yarl==1.7.2
zipp==3.6.0
| name: pyticketswitch
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- astroid==2.11.7
- attrs==22.2.0
- behave==1.2.6
- coverage==6.2
- dill==0.3.4
- distlib==0.3.9
- filelock==3.4.1
- flake8==5.0.4
- idna==3.10
- importlib-metadata==1.7.0
- importlib-resources==5.4.0
- iniconfig==1.1.1
- isort==5.10.1
- lazy-object-proxy==1.7.1
- mccabe==0.7.0
- mock==5.2.0
- multidict==5.2.0
- packaging==21.3
- parse==1.20.2
- parse-type==0.6.0
- platformdirs==2.4.0
- pluggy==0.13.1
- py==1.11.0
- pycodestyle==2.9.1
- pyflakes==2.5.0
- pyhamcrest==2.1.0
- pylint==2.13.9
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pyyaml==6.0.1
- requests==2.9.1
- requests-mock==1.11.0
- six==1.11.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.14.3
- typed-ast==1.5.5
- typing-extensions==4.1.1
- vcrpy==4.1.1
- virtualenv==20.16.2
- wrapt==1.16.0
- yarl==1.7.2
- zipp==3.6.0
prefix: /opt/conda/envs/pyticketswitch
| [
"tests/test_client.py::TestClient::test_get_trolley_with_unavailable_order",
"tests/test_client.py::TestClient::test_make_reservation_with_unavailable_order",
"tests/test_reservation.py::TestReservation::test_from_api_data_with_unavailable_orders",
"tests/test_trolley.py::TestTrolley::test_from_api_data_with_empty_trolley"
] | [] | [
"tests/test_client.py::TestClient::test_get_url",
"tests/test_client.py::TestClient::test_make_request",
"tests/test_client.py::TestClient::test_make_request_with_timeout",
"tests/test_client.py::TestClient::test_make_request_with_post",
"tests/test_client.py::TestClient::test_make_request_with_subuser",
"tests/test_client.py::TestClient::test_make_request_with_tracking_id",
"tests/test_client.py::TestClient::test_make_request_when_using_per_request_tracking_id",
"tests/test_client.py::TestClient::test_make_request_bad_response_with_auth_error",
"tests/test_client.py::TestClient::test_make_request_bad_response_with_error",
"tests/test_client.py::TestClient::test_make_request_bad_response_without_error",
"tests/test_client.py::TestClient::test_make_request_410_gone_response",
"tests/test_client.py::TestClient::test_make_request_no_contents_raises",
"tests/test_client.py::TestClient::test_add_optional_kwargs_extra_info",
"tests/test_client.py::TestClient::test_add_optional_kwargs_reviews",
"tests/test_client.py::TestClient::test_add_optional_kwargs_media",
"tests/test_client.py::TestClient::test_add_optional_kwargs_cost_range",
"tests/test_client.py::TestClient::test_add_optional_kwargs_best_value_offer",
"tests/test_client.py::TestClient::test_add_optional_kwargs_max_saving_offer",
"tests/test_client.py::TestClient::test_add_optional_kwargs_min_cost_offer",
"tests/test_client.py::TestClient::test_add_optional_kwargs_top_price_offer",
"tests/test_client.py::TestClient::test_add_optional_kwargs_no_singles_data",
"tests/test_client.py::TestClient::test_add_optional_kwargs_cost_range_details",
"tests/test_client.py::TestClient::test_add_optional_kwargs_avail_details",
"tests/test_client.py::TestClient::test_add_optional_kwargs_avail_details_with_perfs",
"tests/test_client.py::TestClient::test_add_optional_kwargs_source_info",
"tests/test_client.py::TestClient::test_list_events",
"tests/test_client.py::TestClient::test_list_events_with_keywords",
"tests/test_client.py::TestClient::test_list_events_with_start_date",
"tests/test_client.py::TestClient::test_list_events_with_end_date",
"tests/test_client.py::TestClient::test_list_events_with_start_and_end_date",
"tests/test_client.py::TestClient::test_list_events_country_code",
"tests/test_client.py::TestClient::test_list_events_city_code",
"tests/test_client.py::TestClient::test_list_events_geolocation",
"tests/test_client.py::TestClient::test_list_events_invalid_geolocation",
"tests/test_client.py::TestClient::test_list_events_include_dead",
"tests/test_client.py::TestClient::test_list_events_sort_order",
"tests/test_client.py::TestClient::test_list_events_pagination",
"tests/test_client.py::TestClient::test_list_events_no_results",
"tests/test_client.py::TestClient::test_list_events_misc_kwargs",
"tests/test_client.py::TestClient::test_get_events",
"tests/test_client.py::TestClient::test_get_events_event_list",
"tests/test_client.py::TestClient::test_get_events_no_results",
"tests/test_client.py::TestClient::test_get_events_misc_kwargs",
"tests/test_client.py::TestClient::test_get_events_with_upsell",
"tests/test_client.py::TestClient::test_get_events_with_addons",
"tests/test_client.py::TestClient::test_get_event",
"tests/test_client.py::TestClient::test_get_months",
"tests/test_client.py::TestClient::test_get_months_no_results",
"tests/test_client.py::TestClient::test_get_months_misc_kwargs",
"tests/test_client.py::TestClient::test_list_performances_no_results",
"tests/test_client.py::TestClient::test_list_performances",
"tests/test_client.py::TestClient::test_list_performances_cost_range",
"tests/test_client.py::TestClient::test_list_performances_best_value_offer",
"tests/test_client.py::TestClient::test_list_performances_max_saving_offer",
"tests/test_client.py::TestClient::test_list_performances_min_cost_offer",
"tests/test_client.py::TestClient::test_list_performances_top_price_offer",
"tests/test_client.py::TestClient::test_list_performances_no_singles_data",
"tests/test_client.py::TestClient::test_list_performances_availability",
"tests/test_client.py::TestClient::test_list_performances_pagination",
"tests/test_client.py::TestClient::test_list_performances_with_start_date",
"tests/test_client.py::TestClient::test_list_performancess_with_end_date",
"tests/test_client.py::TestClient::test_list_performances_with_start_and_end_date",
"tests/test_client.py::TestClient::test_list_performances_misc_kwargs",
"tests/test_client.py::TestClient::test_get_performances",
"tests/test_client.py::TestClient::test_get_performances_no_performances",
"tests/test_client.py::TestClient::test_get_performances_misc_kwargs",
"tests/test_client.py::TestClient::test_get_performance",
"tests/test_client.py::TestClient::test_get_availability",
"tests/test_client.py::TestClient::test_get_availability_with_number_of_seats",
"tests/test_client.py::TestClient::test_get_availability_with_discounts",
"tests/test_client.py::TestClient::test_get_availability_with_example_seats",
"tests/test_client.py::TestClient::test_get_availability_with_seat_blocks",
"tests/test_client.py::TestClient::test_get_availability_with_user_commission",
"tests/test_client.py::TestClient::test_get_availability_no_availability",
"tests/test_client.py::TestClient::test_get_send_methods",
"tests/test_client.py::TestClient::test_get_send_methods_bad_data",
"tests/test_client.py::TestClient::test_get_discounts",
"tests/test_client.py::TestClient::test_get_discounts_bad_data",
"tests/test_client.py::TestClient::test_trolley_params_with_trolley_token",
"tests/test_client.py::TestClient::test_trolley_params_with_performance_id",
"tests/test_client.py::TestClient::test_trolley_params_with_number_of_seats",
"tests/test_client.py::TestClient::test_trolley_params_with_ticket_type_code",
"tests/test_client.py::TestClient::test_trolley_params_with_price_band_code",
"tests/test_client.py::TestClient::test_trolley_params_with_item_numbers_to_remove",
"tests/test_client.py::TestClient::test_trolley_params_with_item_numbers_to_remove_with_no_token",
"tests/test_client.py::TestClient::test_trolley_params_with_seats",
"tests/test_client.py::TestClient::test_trolley_params_with_discounts",
"tests/test_client.py::TestClient::test_trolley_params_with_send_codes",
"tests/test_client.py::TestClient::test_trolley_params_with_invalid_send_codes",
"tests/test_client.py::TestClient::test_get_trolley",
"tests/test_client.py::TestClient::test_get_upsells",
"tests/test_client.py::TestClient::test_get_addons",
"tests/test_client.py::TestClient::test_make_reservation",
"tests/test_client.py::TestClient::test_get_status",
"tests/test_client.py::TestClient::test_get_status_with_trans",
"tests/test_client.py::TestClient::test_test",
"tests/test_client.py::TestClient::test_release_reservation",
"tests/test_client.py::TestClient::test_make_purchase_card_details",
"tests/test_client.py::TestClient::test_make_purchase_redirection",
"tests/test_client.py::TestClient::test_make_purchase_credit",
"tests/test_client.py::TestClient::test_make_purchase_opting_out_of_confirmation_email",
"tests/test_client.py::TestClient::test_next_callout",
"tests/test_client.py::TestClient::test_next_callout_with_additional_callout",
"tests/test_client.py::TestClient::test_auth_can_be_overridden_with_subclass",
"tests/test_client.py::TestClient::test_extra_params_can_be_overriden_by_subclass",
"tests/test_client.py::TestClient::test_get_auth_params_raises_deprecation_warning",
"tests/test_client.py::TestClient::test_make_request_using_decimal_parsing",
"tests/test_client.py::TestClient::test_make_request_using_float_parsing",
"tests/test_reservation.py::TestReservation::test_from_api_data",
"tests/test_trolley.py::TestTrolley::test_from_api_data_with_trolley_data",
"tests/test_trolley.py::TestTrolley::test_from_api_data_with_reservation_data",
"tests/test_trolley.py::TestTrolley::test_get_events",
"tests/test_trolley.py::TestTrolley::test_get_events_with_no_bundles",
"tests/test_trolley.py::TestTrolley::test_get_event_ids",
"tests/test_trolley.py::TestTrolley::test_get_bundle",
"tests/test_trolley.py::TestTrolley::test_get_bundle_when_none",
"tests/test_trolley.py::TestTrolley::test_get_bundle_when_no_match",
"tests/test_trolley.py::TestTrolley::test_get_item",
"tests/test_trolley.py::TestTrolley::test_get_orders"
] | [] | MIT License | 2,597 | 2,027 | [
"pyticketswitch/client.py",
"pyticketswitch/exceptions.py",
"pyticketswitch/reservation.py",
"pyticketswitch/trolley.py"
] |
capitalone__datacompy-18 | 370f7efbe1a5206c525a6da40410442a4ce8d51c | 2018-05-30 04:59:48 | 246aad8c381f7591512f6ecef9debf6341261578 | diff --git a/datacompy/core.py b/datacompy/core.py
index 7fc296e..e03d75e 100644
--- a/datacompy/core.py
+++ b/datacompy/core.py
@@ -59,6 +59,8 @@ class Compare(object):
more easily track the dataframes.
df2_name : str, optional
A string name for the second dataframe
+ ignore_spaces : bool, optional
+ Flag to strip whitespace (including newlines) from string columns
Attributes
----------
@@ -70,7 +72,7 @@ class Compare(object):
def __init__(
self, df1, df2, join_columns=None, on_index=False, abs_tol=0,
- rel_tol=0, df1_name='df1', df2_name='df2'):
+ rel_tol=0, df1_name='df1', df2_name='df2', ignore_spaces=False):
if on_index and join_columns is not None:
raise Exception('Only provide on_index or join_columns')
@@ -93,7 +95,7 @@ class Compare(object):
self.rel_tol = rel_tol
self.df1_unq_rows = self.df2_unq_rows = self.intersect_rows = None
self.column_stats = []
- self._compare()
+ self._compare(ignore_spaces)
@property
def df1(self):
@@ -143,7 +145,7 @@ class Compare(object):
if len(dataframe.drop_duplicates(subset=self.join_columns)) < len(dataframe):
self._any_dupes = True
- def _compare(self):
+ def _compare(self, ignore_spaces):
"""Actually run the comparison. This tries to run df1.equals(df2)
first so that if they're truly equal we can tell.
@@ -167,8 +169,8 @@ class Compare(object):
LOG.info('Number of columns in df2 and not in df1: {}'.format(
len(self.df2_unq_columns())))
LOG.debug('Merging dataframes')
- self._dataframe_merge()
- self._intersect_compare()
+ self._dataframe_merge(ignore_spaces)
+ self._intersect_compare(ignore_spaces)
if self.matches():
LOG.info('df1 matches df2')
else:
@@ -186,7 +188,7 @@ class Compare(object):
"""Get columns that are shared between the two dataframes"""
return set(self.df1.columns) & set(self.df2.columns)
- def _dataframe_merge(self):
+ def _dataframe_merge(self, ignore_spaces):
"""Merge df1 to df2 on the join columns, to get df1 - df2, df2 - df1
and df1 & df2
@@ -262,7 +264,7 @@ class Compare(object):
'Number of rows in df1 and df2 (not necessarily equal): {}'.format(
len(self.intersect_rows)))
- def _intersect_compare(self):
+ def _intersect_compare(self, ignore_spaces):
"""Run the comparison on the intersect dataframe
This loops through all columns that are shared between df1 and df2, and
@@ -285,7 +287,8 @@ class Compare(object):
self.intersect_rows[col_1],
self.intersect_rows[col_2],
self.rel_tol,
- self.abs_tol)
+ self.abs_tol,
+ ignore_spaces)
match_cnt = self.intersect_rows[col_match].sum()
try:
@@ -570,7 +573,7 @@ def render(filename, *fields):
return file_open.read().format(*fields)
-def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0):
+def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0, ignore_spaces=False):
"""Compares two columns from a dataframe, returning a True/False series,
with the same index as column 1.
@@ -592,6 +595,8 @@ def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0):
Relative tolerance
abs_tol : float, optional
Absolute tolerance
+ ignore_spaces : bool, optional
+ Flag to strip whitespace (including newlines) from string columns
Returns
-------
@@ -616,6 +621,12 @@ def columns_equal(col_1, col_2, rel_tol=0, abs_tol=0):
equal_nan=True))
except (ValueError, TypeError):
try:
+ if ignore_spaces:
+ if col_1.dtype.kind == 'O':
+ col_1 = col_1.str.strip()
+ if col_2.dtype.kind == 'O':
+ col_2 = col_2.str.strip()
+
if set([col_1.dtype.kind, col_2.dtype.kind]) == set(['M','O']):
compare = compare_string_and_date_columns(col_1, col_2)
else:
| Would be useful to have a parameter to strip spaces for comparison
As probably expected, the following code will return a mismatch since 'B'<>'B ':
```
import pandas as pd
import datacompy
df1 = pd.DataFrame([
{'id': 1234, 'column_value': 'A'},
{'id': 2345, 'column_value': 'B'}])
df2 = pd.DataFrame([
{'id': 1234, 'column_value': 'A'},
{'id': 2345, 'column_value': 'B '}])
compare = datacompy.Compare(
df1,
df2,
join_columns='id',
abs_tol=0,
rel_tol=0,
)
compare.matches(ignore_extra_columns=False)
# False
# This method prints out a human-readable report summarizing and sampling differences
print(compare.report())
```
What I propose is an optional parameter to ignore differences where the only difference is leading or trailing spaces. In this example it is obvious that there is a trailing space. However, when we are dealing with extracts from different databases/source files, without real control over the ETL of these, sometimes we can't prevent these discrepancies. We may wish to ignore these types of mismatches to identify 'worse' mismatches more effectively.
Another candidate could be ignoring case sensitivity differences.
Of course these could both be easily handled with preprocessing the dataframes, but still could be some convenient enhancements! | capitalone/datacompy | diff --git a/tests/test_core.py b/tests/test_core.py
index d236427..f3e8437 100644
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -85,6 +85,28 @@ something||False
assert_series_equal(expect_out, actual_out, check_names=False)
+def test_string_columns_equal_with_ignore_spaces():
+ data = '''a|b|expected
+Hi|Hi|True
+Yo|Yo|True
+Hey|Hey |True
+résumé|resume|False
+résumé|résumé|True
+💩|💩|True
+💩|🤔|False
+ | |True
+ | |True
+datacompy|DataComPy|False
+something||False
+|something|False
+||True'''
+ df = pd.read_csv(six.StringIO(data), sep='|')
+ actual_out = datacompy.columns_equal(
+ df.a, df.b, rel_tol=0.2, ignore_spaces=True)
+ expect_out = df['expected']
+ assert_series_equal(expect_out, actual_out, check_names=False)
+
+
def test_date_columns_equal():
data = '''a|b|expected
2017-01-01|2017-01-01|True
@@ -110,6 +132,34 @@ def test_date_columns_equal():
assert_series_equal(expect_out, actual_out_rev, check_names=False)
+def test_date_columns_equal_with_ignore_spaces():
+ data = '''a|b|expected
+2017-01-01|2017-01-01 |True
+2017-01-02 |2017-01-02|True
+2017-10-01 |2017-10-10 |False
+2017-01-01||False
+|2017-01-01|False
+||True'''
+ df = pd.read_csv(six.StringIO(data), sep='|')
+ #First compare just the strings
+ actual_out = datacompy.columns_equal(
+ df.a, df.b, rel_tol=0.2, ignore_spaces=True)
+ expect_out = df['expected']
+ assert_series_equal(expect_out, actual_out, check_names=False)
+
+ #Then compare converted to datetime objects
+ df['a'] = pd.to_datetime(df['a'])
+ df['b'] = pd.to_datetime(df['b'])
+ actual_out = datacompy.columns_equal(
+ df.a, df.b, rel_tol=0.2, ignore_spaces=True)
+ expect_out = df['expected']
+ assert_series_equal(expect_out, actual_out, check_names=False)
+ #and reverse
+ actual_out_rev = datacompy.columns_equal(
+ df.b, df.a, rel_tol=0.2, ignore_spaces=True)
+ assert_series_equal(expect_out, actual_out_rev, check_names=False)
+
+
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
@@ -250,6 +300,20 @@ def test_mixed_column():
assert_series_equal(expect_out, actual_out, check_names=False)
+def test_mixed_column_with_ignore_spaces():
+ df = pd.DataFrame([
+ {'a': 'hi', 'b': 'hi ', 'expected': True},
+ {'a': 1, 'b': 1, 'expected': True},
+ {'a': np.inf, 'b': np.inf, 'expected': True},
+ {'a': Decimal('1'), 'b': Decimal('1'), 'expected': True},
+ {'a': 1, 'b': '1 ', 'expected': False},
+ {'a': 1, 'b': 'yo ', 'expected': False}
+ ])
+ actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
+ expect_out = df['expected']
+ assert_series_equal(expect_out, actual_out, check_names=False)
+
+
def test_compare_df_setter_bad():
df = pd.DataFrame([{'a': 1, 'A': 2}, {'a': 2, 'A': 2}])
with raises(TypeError, message='df1 must be a pandas DataFrame'):
@@ -565,4 +629,52 @@ def test_dupes_from_real_data():
assert compare_unq.matches()
#Just render the report to make sure it renders.
t = compare_acct.report()
- r = compare_unq.report()
\ No newline at end of file
+ r = compare_unq.report()
+
+
+def test_strings_with_joins_with_ignore_spaces():
+ df1 = pd.DataFrame([{'a': 'hi', 'b': ' A'}, {'a': 'bye', 'b': 'A'}])
+ df2 = pd.DataFrame([{'a': 'hi', 'b': 'A'}, {'a': 'bye', 'b': 'A '}])
+ compare = datacompy.Compare(df1, df2, 'a', ignore_spaces=False)
+ assert not compare.matches()
+ assert compare.all_columns_match()
+ assert compare.all_rows_overlap()
+ assert not compare.intersect_rows_match()
+
+ compare = datacompy.Compare(df1, df2, 'a', ignore_spaces=True)
+ assert compare.matches()
+ assert compare.all_columns_match()
+ assert compare.all_rows_overlap()
+ assert compare.intersect_rows_match()
+
+
+def test_decimal_with_joins_with_ignore_spaces():
+ df1 = pd.DataFrame([{'a': 1, 'b': ' A'}, {'a': 2, 'b': 'A'}])
+ df2 = pd.DataFrame([{'a': 1, 'b': 'A'}, {'a': 2, 'b': 'A '}])
+ compare = datacompy.Compare(df1, df2, 'a', ignore_spaces=False)
+ assert not compare.matches()
+ assert compare.all_columns_match()
+ assert compare.all_rows_overlap()
+ assert not compare.intersect_rows_match()
+
+ compare = datacompy.Compare(df1, df2, 'a', ignore_spaces=True)
+ assert compare.matches()
+ assert compare.all_columns_match()
+ assert compare.all_rows_overlap()
+ assert compare.intersect_rows_match()
+
+
+def test_index_with_joins_with_ignore_spaces():
+ df1 = pd.DataFrame([{'a': 1, 'b': ' A'}, {'a': 2, 'b': 'A'}])
+ df2 = pd.DataFrame([{'a': 1, 'b': 'A'}, {'a': 2, 'b': 'A '}])
+ compare = datacompy.Compare(df1, df2, on_index=True, ignore_spaces=False)
+ assert not compare.matches()
+ assert compare.all_columns_match()
+ assert compare.all_rows_overlap()
+ assert not compare.intersect_rows_match()
+
+ compare = datacompy.Compare(df1, df2, 'a', ignore_spaces=True)
+ assert compare.matches()
+ assert compare.all_columns_match()
+ assert compare.all_rows_overlap()
+ assert compare.intersect_rows_match()
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 1
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest>=3.0.6"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
-e git+https://github.com/capitalone/datacompy.git@370f7efbe1a5206c525a6da40410442a4ce8d51c#egg=datacompy
importlib-metadata==4.8.3
iniconfig==1.1.1
numpy==1.19.5
packaging==21.3
pandas==1.1.5
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pytest==7.0.1
python-dateutil==2.9.0.post0
pytz==2025.2
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: datacompy
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- numpy==1.19.5
- packaging==21.3
- pandas==1.1.5
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pytest==7.0.1
- python-dateutil==2.9.0.post0
- pytz==2025.2
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/datacompy
| [
"tests/test_core.py::test_string_columns_equal_with_ignore_spaces",
"tests/test_core.py::test_date_columns_equal_with_ignore_spaces",
"tests/test_core.py::test_mixed_column_with_ignore_spaces",
"tests/test_core.py::test_strings_with_joins_with_ignore_spaces",
"tests/test_core.py::test_decimal_with_joins_with_ignore_spaces",
"tests/test_core.py::test_index_with_joins_with_ignore_spaces"
] | [
"tests/test_core.py::test_compare_df_setter_bad",
"tests/test_core.py::test_compare_df_setter_bad_index",
"tests/test_core.py::test_compare_on_index_and_join_columns",
"tests/test_core.py::test_simple_dupes_index"
] | [
"tests/test_core.py::test_numeric_columns_equal_abs",
"tests/test_core.py::test_numeric_columns_equal_rel",
"tests/test_core.py::test_string_columns_equal",
"tests/test_core.py::test_date_columns_equal",
"tests/test_core.py::test_date_columns_unequal",
"tests/test_core.py::test_bad_date_columns",
"tests/test_core.py::test_rounded_date_columns",
"tests/test_core.py::test_decimal_float_columns_equal",
"tests/test_core.py::test_decimal_float_columns_equal_rel",
"tests/test_core.py::test_decimal_columns_equal",
"tests/test_core.py::test_decimal_columns_equal_rel",
"tests/test_core.py::test_infinity_and_beyond",
"tests/test_core.py::test_mixed_column",
"tests/test_core.py::test_compare_df_setter_good",
"tests/test_core.py::test_compare_df_setter_different_cases",
"tests/test_core.py::test_compare_df_setter_good_index",
"tests/test_core.py::test_columns_overlap",
"tests/test_core.py::test_columns_no_overlap",
"tests/test_core.py::test_10k_rows",
"tests/test_core.py::test_subset",
"tests/test_core.py::test_not_subset",
"tests/test_core.py::test_large_subset",
"tests/test_core.py::test_string_joiner",
"tests/test_core.py::test_decimal_with_joins",
"tests/test_core.py::test_decimal_with_nulls",
"tests/test_core.py::test_strings_with_joins",
"tests/test_core.py::test_index_joining",
"tests/test_core.py::test_index_joining_strings_i_guess",
"tests/test_core.py::test_index_joining_non_overlapping",
"tests/test_core.py::test_temp_column_name",
"tests/test_core.py::test_temp_column_name_one_has",
"tests/test_core.py::test_temp_column_name_both_have",
"tests/test_core.py::test_temp_column_name_one_already",
"tests/test_core.py::test_simple_dupes_one_field",
"tests/test_core.py::test_simple_dupes_two_fields",
"tests/test_core.py::test_simple_dupes_one_field_two_vals",
"tests/test_core.py::test_simple_dupes_one_field_three_to_two_vals",
"tests/test_core.py::test_dupes_from_real_data"
] | [] | Apache License 2.0 | 2,598 | 1,126 | [
"datacompy/core.py"
] |
|
peterbe__hashin-65 | bbe0b6c379e25fbd8d3e702473e8e29677ccd9c0 | 2018-05-30 21:13:24 | bbe0b6c379e25fbd8d3e702473e8e29677ccd9c0 | diff --git a/hashin.py b/hashin.py
index c1bb79b..1590560 100755
--- a/hashin.py
+++ b/hashin.py
@@ -58,6 +58,11 @@ parser.add_argument(
help='Verbose output',
action='store_true',
)
+parser.add_argument(
+ '--include-prereleases',
+ help='Include pre-releases (off by default)',
+ action='store_true',
+)
parser.add_argument(
'-p', '--python-version',
help='Python version to add wheels for. May be used multiple times.',
@@ -83,6 +88,10 @@ class PackageError(Exception):
pass
+class NoVersionsError(Exception):
+ """When there are no valid versions found."""
+
+
def _verbose(*args):
print('* ' + ' '.join(args))
@@ -127,6 +136,7 @@ def run_single_package(
algorithm,
python_versions=None,
verbose=False,
+ include_prereleases=False,
):
restriction = None
if ';' in spec:
@@ -143,7 +153,8 @@ def run_single_package(
version=version,
verbose=verbose,
python_versions=python_versions,
- algorithm=algorithm
+ algorithm=algorithm,
+ include_prereleases=include_prereleases,
)
package = data['package']
@@ -202,7 +213,7 @@ def amend_requirements_content(requirements, package, new_lines):
return requirements
-def get_latest_version(data):
+def get_latest_version(data, include_prereleases):
"""
Return the version string of what we think is the latest version.
In the data blob from PyPI there is the info->version key which
@@ -214,11 +225,22 @@ def get_latest_version(data):
# This feels kinda strange but it has worked for years
return data['info']['version']
all_versions = []
+ count_prereleases = 0
for version in data['releases']:
v = parse(version)
- if not v.is_prerelease:
+ if not v.is_prerelease or include_prereleases:
all_versions.append((v, version))
+ else:
+ count_prereleases += 1
all_versions.sort(reverse=True)
+ if not all_versions:
+ msg = "Not a single valid version found."
+ if not include_prereleases and count_prereleases:
+ msg += (
+ " But, found {0} pre-releases. Consider running again "
+ "with the --include-prereleases flag."
+ )
+ raise NoVersionsError(msg)
# return the highest non-pre-release version
return str(all_versions[0][1])
@@ -378,6 +400,7 @@ def get_package_hashes(
algorithm=DEFAULT_ALGORITHM,
python_versions=(),
verbose=False,
+ include_prereleases=False,
):
"""
Gets the hashes for the given package.
@@ -404,7 +427,7 @@ def get_package_hashes(
"""
data = get_package_data(package, verbose)
if not version:
- version = get_latest_version(data)
+ version = get_latest_version(data, include_prereleases)
assert version
if verbose:
_verbose('Latest version for {0} is {1}'.format(
@@ -472,6 +495,7 @@ def main():
args.algorithm,
args.python_version,
verbose=args.verbose,
+ include_prereleases=args.include_prereleases,
)
except PackageError as exception:
print(str(exception), file=sys.stderr)
| `hashin black` fails
```
▶ hashin black
Traceback (most recent call last):
File "/usr/local/bin/hashin", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.6/site-packages/hashin.py", line 474, in main
verbose=args.verbose,
File "/usr/local/lib/python3.6/site-packages/hashin.py", line 120, in run
run_single_package(spec, *args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/hashin.py", line 146, in run_single_package
algorithm=algorithm
File "/usr/local/lib/python3.6/site-packages/hashin.py", line 407, in get_package_hashes
version = get_latest_version(data)
File "/usr/local/lib/python3.6/site-packages/hashin.py", line 223, in get_latest_version
return str(all_versions[0][1])
IndexError: list index out of range
```
| peterbe/hashin | diff --git a/tests/test_arg_parse.py b/tests/test_arg_parse.py
index 7d00e74..a6b6236 100644
--- a/tests/test_arg_parse.py
+++ b/tests/test_arg_parse.py
@@ -18,6 +18,7 @@ def test_everything():
requirements_file='reqs.txt',
verbose=True,
version=False,
+ include_prereleases=False,
)
assert args == (expected, [])
@@ -37,6 +38,7 @@ def test_everything_long():
requirements_file='reqs.txt',
verbose=True,
version=False,
+ include_prereleases=False,
)
assert args == (expected, [])
@@ -50,5 +52,6 @@ def test_minimal():
requirements_file='requirements.txt',
verbose=False,
version=False,
+ include_prereleases=False,
)
assert args == (expected, [])
diff --git a/tests/test_cli.py b/tests/test_cli.py
index c0b566f..72cf2de 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -73,7 +73,10 @@ class Tests(TestCase):
@mock.patch('hashin.urlopen')
def test_get_latest_version_simple(self, murlopen):
- version = hashin.get_latest_version({'info': {'version': '0.3'}})
+ version = hashin.get_latest_version(
+ {'info': {'version': '0.3'}},
+ False
+ )
self.assertEqual(version, '0.3')
@mock.patch('hashin.urlopen')
@@ -91,9 +94,43 @@ class Tests(TestCase):
'2.0b2': {},
'2.0c3': {},
}
- })
+ }, False)
self.assertEqual(version, '0.999')
+ @mock.patch('hashin.urlopen')
+ def test_get_latest_version_only_pre_release(self, murlopen):
+ self.assertRaises(
+ hashin.NoVersionsError,
+ hashin.get_latest_version,
+ {
+ 'info': {
+ 'version': '0.3',
+ },
+ 'releases': {
+ '1.1.0rc1': {},
+ '1.1rc1': {},
+ '1.0a1': {},
+ '2.0b2': {},
+ '2.0c3': {},
+ }
+ },
+ False,
+ )
+
+ version = hashin.get_latest_version({
+ 'info': {
+ 'version': '0.3',
+ },
+ 'releases': {
+ '1.1.0rc1': {},
+ '1.1rc1': {},
+ '1.0a1': {},
+ '2.0b2': {},
+ '2.0c3': {},
+ }
+ }, True)
+ self.assertEqual(version, '2.0c3')
+
@mock.patch('hashin.urlopen')
def test_get_latest_version_non_pre_release_leading_zeros(self, murlopen):
version = hashin.get_latest_version({
@@ -105,7 +142,7 @@ class Tests(TestCase):
'0.04.21': {},
'0.04.09': {},
}
- })
+ }, False)
self.assertEqual(version, '0.04.21')
@mock.patch('hashin.urlopen')
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup==1.2.2
-e git+https://github.com/peterbe/hashin.git@bbe0b6c379e25fbd8d3e702473e8e29677ccd9c0#egg=hashin
iniconfig==2.1.0
mock==5.2.0
packaging==24.2
pip-api==0.0.34
pluggy==1.5.0
pytest==8.3.5
tomli==2.2.1
| name: hashin
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- exceptiongroup==1.2.2
- iniconfig==2.1.0
- mock==5.2.0
- packaging==24.2
- pip-api==0.0.34
- pluggy==1.5.0
- pytest==8.3.5
- tomli==2.2.1
prefix: /opt/conda/envs/hashin
| [
"tests/test_arg_parse.py::test_everything",
"tests/test_arg_parse.py::test_everything_long",
"tests/test_arg_parse.py::test_minimal",
"tests/test_cli.py::Tests::test_get_latest_version_non_pre_release",
"tests/test_cli.py::Tests::test_get_latest_version_non_pre_release_leading_zeros",
"tests/test_cli.py::Tests::test_get_latest_version_only_pre_release",
"tests/test_cli.py::Tests::test_get_latest_version_simple"
] | [] | [
"tests/test_cli.py::Tests::test_amend_requirements_content_new",
"tests/test_cli.py::Tests::test_amend_requirements_content_new_similar_name",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_2",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_amonst_others",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_amonst_others_2",
"tests/test_cli.py::Tests::test_amend_requirements_content_replacement_single_to_multi",
"tests/test_cli.py::Tests::test_expand_python_version",
"tests/test_cli.py::Tests::test_filter_releases",
"tests/test_cli.py::Tests::test_get_hashes_error",
"tests/test_cli.py::Tests::test_get_package_hashes",
"tests/test_cli.py::Tests::test_get_package_hashes_unknown_algorithm",
"tests/test_cli.py::Tests::test_get_package_hashes_without_version",
"tests/test_cli.py::Tests::test_main_packageerrors_stderr",
"tests/test_cli.py::Tests::test_main_version",
"tests/test_cli.py::Tests::test_non_200_ok_download",
"tests/test_cli.py::Tests::test_release_url_metadata_python",
"tests/test_cli.py::Tests::test_run",
"tests/test_cli.py::Tests::test_run_case_insensitive",
"tests/test_cli.py::Tests::test_run_contained_names",
"tests/test_cli.py::Tests::test_run_pep_0496",
"tests/test_cli.py::Tests::test_run_without_specific_version"
] | [] | MIT License | 2,603 | 828 | [
"hashin.py"
] |
|
HXLStandard__libhxl-python-174 | 1bc7e92a3844dd443f9e31f478357ea7b599c831 | 2018-05-30 21:52:05 | 1bc7e92a3844dd443f9e31f478357ea7b599c831 | diff --git a/hxl/validation.py b/hxl/validation.py
index 3eb1eba..922bfaf 100644
--- a/hxl/validation.py
+++ b/hxl/validation.py
@@ -1508,11 +1508,6 @@ def validate(data, schema=None):
issue_map = dict()
- def make_rule_hash(rule):
- """Make a good-enough hash for a rule."""
- s = "\r".join([str(rule.severity), str(rule.description), str(rule.tag_pattern)])
- return base64.urlsafe_b64encode(hashlib.md5(s.encode('utf-8')).digest())[:8].decode('ascii')
-
def add_issue(issue):
hash = make_rule_hash(issue.rule)
issue_map.setdefault(hash, []).append(issue)
@@ -1562,9 +1557,10 @@ def make_json_report(status, issue_map, schema_url=None, data_url=None):
# add the issue objects
for rule_id, locations in issue_map.items():
- json_report['stats']['total'] += len(locations)
- json_report['stats'][locations[0].rule.severity] += len(locations)
- json_report['issues'].append(make_json_issue(rule_id, locations))
+ json_issue = make_json_issue(rule_id, locations)
+ json_report['stats']['total'] += len(json_issue['locations'])
+ json_report['stats'][locations[0].rule.severity] += len(json_issue['locations'])
+ json_report['issues'].append(json_issue)
return json_report
@@ -1581,6 +1577,15 @@ def make_json_issue(rule_id, locations):
if not description:
description = model.message
+ # get all unique locations
+ location_keys = set()
+ json_locations = []
+ for location in locations:
+ location_key = (location.row.row_number, location.column.column_number, location.value, location.suggested_value,)
+ if not location_key in location_keys:
+ json_locations.append(make_json_location(location))
+ location_keys.add(location_key)
+
# make the issue
json_issue = {
"rule_id": rule_id,
@@ -1589,7 +1594,7 @@ def make_json_issue(rule_id, locations):
"severity": model.rule.severity,
"location_count": len(locations),
"scope": model.scope,
- "locations": [make_json_location(location) for location in locations]
+ "locations": json_locations
}
return json_issue
@@ -1622,4 +1627,10 @@ def make_json_location(location):
return json_location
+
+def make_rule_hash(rule):
+ """Make a good-enough hash for a rule."""
+ s = "\r".join([str(rule.severity), str(rule.description), str(rule.tag_pattern)])
+ return base64.urlsafe_b64encode(hashlib.md5(s.encode('utf-8')).digest())[:8].decode('ascii')
+
# end
| Double counting of errors p-code adm name combination consistency errors
When I put the below into data check, I get 2 of every cell eg. F3,F3,F4,F4,F5,F5...
https://data.humdata.org/dataset/77c97850-4004-4285-94db-0b390a962d6e/resource/d6c0dbac-683d-42d7-82b4-a6379bd4f48e/download/mrt_population_statistics_ons_rgph_2013_2017.xlsx | HXLStandard/libhxl-python | diff --git a/tests/test_validation.py b/tests/test_validation.py
index 43ab00b..7e8b4f3 100644
--- a/tests/test_validation.py
+++ b/tests/test_validation.py
@@ -655,8 +655,8 @@ class TestValidateDataset(unittest.TestCase):
def test_double_correlation(self):
"""Test correlation when more than one column has same tagspec"""
SCHEMA = [
- ['#valid_tag', '#valid_correlation'],
- ['#adm1+code', '#adm1+name']
+ ['#valid_tag', '#description', '#valid_correlation', '#valid_value+list'],
+ ['#adm1+code', 'xxxxx', '#adm1+name', 'X001|X002']
]
DATASET = [
['#adm1+name', '#adm1+code', '#adm1+code'],
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 4.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
-e git+https://github.com/HXLStandard/libhxl-python.git@1bc7e92a3844dd443f9e31f478357ea7b599c831#egg=libhxl
packaging==24.2
pluggy==1.5.0
pytest==8.3.5
python-dateutil==2.9.0.post0
python-io-wrapper==0.3.1
requests==2.32.3
six==1.17.0
tomli==2.2.1
Unidecode==1.3.8
urllib3==2.3.0
xlrd==2.0.1
| name: libhxl-python
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- python-dateutil==2.9.0.post0
- python-io-wrapper==0.3.1
- requests==2.32.3
- six==1.17.0
- tomli==2.2.1
- unidecode==1.3.8
- urllib3==2.3.0
- xlrd==2.0.1
prefix: /opt/conda/envs/libhxl-python
| [
"tests/test_validation.py::TestValidateDataset::test_double_correlation"
] | [
"tests/test_validation.py::TestValidateDataset::test_default_schema",
"tests/test_validation.py::TestLoad::test_load_default",
"tests/test_validation.py::TestJSONSchema::test_truthy",
"tests/test_validation.py::TestJSONReport::test_default",
"tests/test_validation.py::TestJSONReport::test_top_level"
] | [
"tests/test_validation.py::TestTests::test_consistent_datatypes",
"tests/test_validation.py::TestTests::test_correlation",
"tests/test_validation.py::TestTests::test_datatype",
"tests/test_validation.py::TestTests::test_enumeration",
"tests/test_validation.py::TestTests::test_enumeration_suggested_values",
"tests/test_validation.py::TestTests::test_outliers",
"tests/test_validation.py::TestTests::test_range",
"tests/test_validation.py::TestTests::test_regex",
"tests/test_validation.py::TestTests::test_required",
"tests/test_validation.py::TestTests::test_spelling_case_insensitive",
"tests/test_validation.py::TestTests::test_spelling_case_sensitive",
"tests/test_validation.py::TestTests::test_unique_row",
"tests/test_validation.py::TestTests::test_unique_value",
"tests/test_validation.py::TestTests::test_whitespace",
"tests/test_validation.py::TestRule::test_datatype",
"tests/test_validation.py::TestRule::test_range",
"tests/test_validation.py::TestRule::test_regex",
"tests/test_validation.py::TestRule::test_row_restrictions",
"tests/test_validation.py::TestRule::test_value_enumeration",
"tests/test_validation.py::TestRule::test_whitespace",
"tests/test_validation.py::TestValidateColumns::test_bad_value_url",
"tests/test_validation.py::TestValidateColumns::test_min_occurs",
"tests/test_validation.py::TestValidateColumns::test_required",
"tests/test_validation.py::TestValidateRow::test_date",
"tests/test_validation.py::TestValidateRow::test_email",
"tests/test_validation.py::TestValidateRow::test_minmax",
"tests/test_validation.py::TestValidateRow::test_number",
"tests/test_validation.py::TestValidateRow::test_url",
"tests/test_validation.py::TestValidateDataset::test_consistent_datatype",
"tests/test_validation.py::TestValidateDataset::test_correlation",
"tests/test_validation.py::TestValidateDataset::test_different_indicator_datatypes",
"tests/test_validation.py::TestValidateDataset::test_outliers",
"tests/test_validation.py::TestValidateDataset::test_spellings",
"tests/test_validation.py::TestValidateDataset::test_spellings_multiple",
"tests/test_validation.py::TestValidateDataset::test_suggested_value_correlation_key",
"tests/test_validation.py::TestValidateDataset::test_unique_compound",
"tests/test_validation.py::TestValidateDataset::test_unique_single",
"tests/test_validation.py::TestLoad::test_load_bad",
"tests/test_validation.py::TestLoad::test_load_good",
"tests/test_validation.py::TestJSONReport::test_errors"
] | [] | The Unlicense | 2,604 | 678 | [
"hxl/validation.py"
] |
|
graphql-python__graphene-752 | 332214ba9c545b6d899e181a34666540f02848fe | 2018-06-01 01:53:33 | f039af2810806ab42521426777b3a0d061b02802 | diff --git a/graphene/types/inputobjecttype.py b/graphene/types/inputobjecttype.py
index dbfccc4..b84fc0f 100644
--- a/graphene/types/inputobjecttype.py
+++ b/graphene/types/inputobjecttype.py
@@ -50,7 +50,10 @@ class InputObjectType(UnmountedType, BaseType):
yank_fields_from_attrs(base.__dict__, _as=InputField)
)
- _meta.fields = fields
+ if _meta.fields:
+ _meta.fields.update(fields)
+ else:
+ _meta.fields = fields
if container is None:
container = type(cls.__name__, (InputObjectTypeContainer, cls), {})
_meta.container = container
| InputObjectType.__init_sublcass_with_meta__ overwrites passed _meta.fields
In `InputObjectType.__init_subclass_with_meta__`, the`fields` of the `_meta` arg are overwritten, which can cause complications for subclassing.
@classmethod
def __init_subclass_with_meta__(cls, container=None, _meta=None, **options):
if not _meta:
_meta = InputObjectTypeOptions(cls)
fields = OrderedDict()
for base in reversed(cls.__mro__):
fields.update(
yank_fields_from_attrs(base.__dict__, _as=InputField)
)
_meta.fields = fields
# should this be:
# if _meta.fields:
# _meta.fields.update(fields)
# else:
# _meta.fields = fields
| graphql-python/graphene | diff --git a/graphene/tests/issues/test_720.py b/graphene/tests/issues/test_720.py
new file mode 100644
index 0000000..8cd99bd
--- /dev/null
+++ b/graphene/tests/issues/test_720.py
@@ -0,0 +1,44 @@
+# https://github.com/graphql-python/graphene/issues/720
+# InputObjectTypes overwrite the "fields" attribute of the provided
+# _meta object, so even if dynamic fields are provided with a standard
+# InputObjectTypeOptions, they are ignored.
+
+import graphene
+
+
+class MyInputClass(graphene.InputObjectType):
+
+ @classmethod
+ def __init_subclass_with_meta__(
+ cls, container=None, _meta=None, fields=None, **options):
+ if _meta is None:
+ _meta = graphene.types.inputobjecttype.InputObjectTypeOptions(cls)
+ _meta.fields = fields
+ super(MyInputClass, cls).__init_subclass_with_meta__(
+ container=container, _meta=_meta, **options)
+
+
+class MyInput(MyInputClass):
+
+ class Meta:
+ fields = dict(x=graphene.Field(graphene.Int))
+
+
+class Query(graphene.ObjectType):
+ myField = graphene.Field(graphene.String, input=graphene.Argument(MyInput))
+
+ def resolve_myField(parent, info, input):
+ return 'ok'
+
+
+def test_issue():
+ query_string = '''
+ query myQuery {
+ myField(input: {x: 1})
+ }
+ '''
+
+ schema = graphene.Schema(query=Query)
+ result = schema.execute(query_string)
+
+ assert not result.errors
| {
"commit_name": "head_commit",
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 1
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[test]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.6",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | aniso8601==3.0.2
attrs @ file:///opt/conda/conda-bld/attrs_1642510447205/work
certifi==2021.5.30
charset-normalizer==2.0.12
coverage==6.2
coveralls==3.3.1
docopt==0.6.2
fastdiff==0.3.0
-e git+https://github.com/graphql-python/graphene.git@332214ba9c545b6d899e181a34666540f02848fe#egg=graphene
graphql-core==2.3.2
graphql-relay==0.4.5
idna==3.10
importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1631916693255/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
iso8601==1.1.0
mock==5.2.0
more-itertools @ file:///tmp/build/80754af9/more-itertools_1637733554872/work
packaging @ file:///tmp/build/80754af9/packaging_1637314298585/work
pluggy @ file:///tmp/build/80754af9/pluggy_1615976315926/work
promise==2.3
py @ file:///opt/conda/conda-bld/py_1644396412707/work
py-cpuinfo==9.0.0
pyparsing @ file:///tmp/build/80754af9/pyparsing_1635766073266/work
pytest==6.2.4
pytest-benchmark==3.4.1
pytest-cov==4.0.0
pytest-mock==3.6.1
pytz==2025.2
requests==2.27.1
Rx==1.6.3
six==1.17.0
snapshottest==0.6.0
termcolor==1.1.0
toml @ file:///tmp/build/80754af9/toml_1616166611790/work
tomli==1.2.3
typing_extensions @ file:///opt/conda/conda-bld/typing_extensions_1647553014482/work
urllib3==1.26.20
wasmer==1.1.0
wasmer-compiler-cranelift==1.1.0
zipp @ file:///tmp/build/80754af9/zipp_1633618647012/work
| name: graphene
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- attrs=21.4.0=pyhd3eb1b0_0
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- importlib-metadata=4.8.1=py36h06a4308_0
- importlib_metadata=4.8.1=hd3eb1b0_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- more-itertools=8.12.0=pyhd3eb1b0_0
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- packaging=21.3=pyhd3eb1b0_0
- pip=21.2.2=py36h06a4308_0
- pluggy=0.13.1=py36h06a4308_0
- py=1.11.0=pyhd3eb1b0_0
- pyparsing=3.0.4=pyhd3eb1b0_0
- pytest=6.2.4=py36h06a4308_2
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- toml=0.10.2=pyhd3eb1b0_0
- typing_extensions=4.1.1=pyh06a4308_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zipp=3.6.0=pyhd3eb1b0_0
- zlib=1.2.13=h5eee18b_1
- pip:
- aniso8601==3.0.2
- charset-normalizer==2.0.12
- coverage==6.2
- coveralls==3.3.1
- docopt==0.6.2
- fastdiff==0.3.0
- graphql-core==2.3.2
- graphql-relay==0.4.5
- idna==3.10
- iso8601==1.1.0
- mock==5.2.0
- promise==2.3
- py-cpuinfo==9.0.0
- pytest-benchmark==3.4.1
- pytest-cov==4.0.0
- pytest-mock==3.6.1
- pytz==2025.2
- requests==2.27.1
- rx==1.6.3
- six==1.17.0
- snapshottest==0.6.0
- termcolor==1.1.0
- tomli==1.2.3
- urllib3==1.26.20
- wasmer==1.1.0
- wasmer-compiler-cranelift==1.1.0
prefix: /opt/conda/envs/graphene
| [
"graphene/tests/issues/test_720.py::test_issue"
] | [] | [] | [] | MIT License | 2,612 | 165 | [
"graphene/types/inputobjecttype.py"
] |
|
Yelp__swagger_spec_validator-95 | 40e1cc926775777ff2d56e271fd61697c6235579 | 2018-06-05 10:37:03 | 40e1cc926775777ff2d56e271fd61697c6235579 | diff --git a/swagger_spec_validator/validator20.py b/swagger_spec_validator/validator20.py
index fe17ded..002eb44 100644
--- a/swagger_spec_validator/validator20.py
+++ b/swagger_spec_validator/validator20.py
@@ -268,6 +268,15 @@ def validate_defaults_in_definition(definition_spec, deref):
validate_property_default(property_spec, deref)
+def validate_arrays_in_definition(definition_spec, def_name=None):
+ if definition_spec.get('type') == 'array' and 'items' not in definition_spec:
+ raise SwaggerValidationError(
+ 'Definition of type array must define `items` property{}.'.format(
+ '' if not def_name else ' (definition {})'.format(def_name),
+ ),
+ )
+
+
def validate_definition(definition, deref, def_name=None):
definition = deref(definition)
@@ -286,6 +295,7 @@ def validate_definition(definition, deref, def_name=None):
)
validate_defaults_in_definition(definition, deref)
+ validate_arrays_in_definition(definition, def_name=def_name)
if 'discriminator' in definition:
required_props, not_required_props = get_collapsed_properties_type_mappings(definition, deref)
| Spec validation will not fail if items is not present and type is array
The following specs are not valid according to [Swagger 2.0 Specs](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#parameter-object), editor.swagger.io and according to `swagger-tools` npm package.
```yaml
swagger: '2.0'
info:
title: Example
produces:
- application/json
paths:
/test:
get:
responses:
'200':
description: HTTP200
schema:
type: array
```
Error reported by editor.swagger.io

Error reported by npm
```
API Errors:
#/paths/~1test/get/responses/200/schema: Missing required property: items
1 error and 0 warnings
``` | Yelp/swagger_spec_validator | diff --git a/tests/validator20/validate_definitions_test.py b/tests/validator20/validate_definitions_test.py
index 0b61dc0..6c2b6aa 100644
--- a/tests/validator20/validate_definitions_test.py
+++ b/tests/validator20/validate_definitions_test.py
@@ -95,3 +95,30 @@ def test_api_check_default_fails(property_spec, validator, instance):
validation_error = excinfo.value.args[1]
assert validation_error.instance == instance
assert validation_error.validator == validator
+
+
+def test_type_array_with_items_succeed_validation():
+ definitions = {
+ 'definition_1': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string',
+ },
+ },
+ }
+
+ # Success if no exception are raised
+ validate_definitions(definitions, lambda x: x)
+
+
+def test_type_array_without_items_succeed_fails():
+ definitions = {
+ 'definition_1': {
+ 'type': 'array',
+ },
+ }
+
+ with pytest.raises(SwaggerValidationError) as excinfo:
+ validate_definitions(definitions, lambda x: x)
+
+ assert str(excinfo.value) == 'Definition of type array must define `items` property (definition definition_1).'
diff --git a/tests/validator20/validate_spec_test.py b/tests/validator20/validate_spec_test.py
index 5bc9e53..981255c 100644
--- a/tests/validator20/validate_spec_test.py
+++ b/tests/validator20/validate_spec_test.py
@@ -341,3 +341,37 @@ def test_failure_because_references_in_operation_responses():
validate_spec(invalid_spec)
assert 'GET /endpoint does not have a valid responses section. ' \
'That section cannot be just a reference to another object.' in str(excinfo.value)
+
+
+def test_type_array_with_items_succeed_validation(minimal_swagger_dict):
+ minimal_swagger_dict['definitions'] = {
+ 'definition_1': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string',
+ },
+ },
+ }
+
+ # Success if no exception are raised
+ validate_spec(minimal_swagger_dict)
+
+
[email protected](
+ 'swagger_dict_override',
+ (
+ {
+ 'definitions': {
+ 'definition_1': {
+ 'type': 'array',
+ },
+ },
+ },
+ )
+)
+def test_type_array_without_items_succeed_fails(minimal_swagger_dict, swagger_dict_override):
+ minimal_swagger_dict.update(swagger_dict_override)
+ with pytest.raises(SwaggerValidationError) as excinfo:
+ validate_spec(minimal_swagger_dict)
+
+ assert str(excinfo.value) == 'Definition of type array must define `items` property (definition definition_1).'
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_hyperlinks",
"has_media"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 2
},
"num_modified_files": 1
} | 2.1 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.6",
"reqs_path": [
"requirements-dev.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | attrs==22.2.0
certifi==2021.5.30
httpretty==1.1.4
importlib-metadata==4.8.3
iniconfig==1.1.1
jsonschema==3.2.0
mock==5.2.0
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyparsing==3.1.4
pyrsistent==0.18.0
pytest==7.0.1
PyYAML==6.0.1
six==1.17.0
-e git+https://github.com/Yelp/swagger_spec_validator.git@40e1cc926775777ff2d56e271fd61697c6235579#egg=swagger_spec_validator
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: swagger_spec_validator
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- attrs==22.2.0
- httpretty==1.1.4
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jsonschema==3.2.0
- mock==5.2.0
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyparsing==3.1.4
- pyrsistent==0.18.0
- pytest==7.0.1
- pyyaml==6.0.1
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/swagger_spec_validator
| [
"tests/validator20/validate_definitions_test.py::test_type_array_without_items_succeed_fails"
] | [
"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec0]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec1]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec2]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec3]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec4]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec5]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec6]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec7]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec8]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec9]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_succeed[property_spec10]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec0-type-wrong_type]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec1-type-wrong_type]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec2-type-wrong_type]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec3-type-wrong_type]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec4-type-wrong_type]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec5-type-wrong_type]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec6-type--1]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec7-minLength-short_string]",
"tests/validator20/validate_definitions_test.py::test_api_check_default_fails[property_spec8-type-not_a_number_or_boolean]",
"tests/validator20/validate_spec_test.py::test_success",
"tests/validator20/validate_spec_test.py::test_definitons_not_present_success",
"tests/validator20/validate_spec_test.py::test_empty_definitions_success",
"tests/validator20/validate_spec_test.py::test_api_parameters_as_refs",
"tests/validator20/validate_spec_test.py::test_fails_on_invalid_external_ref_in_dict",
"tests/validator20/validate_spec_test.py::test_fails_on_invalid_external_ref_in_list",
"tests/validator20/validate_spec_test.py::test_recursive_ref",
"tests/validator20/validate_spec_test.py::test_recursive_ref_failure",
"tests/validator20/validate_spec_test.py::test_complicated_refs",
"tests/validator20/validate_spec_test.py::test_specs_with_discriminator",
"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_fail_because_not_required",
"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_fail_because_not_string",
"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_fail_because_not_in_properties",
"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_in_allOf",
"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_in_allOf_fail_because_not_required",
"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_in_allOf_fail_because_not_string",
"tests/validator20/validate_spec_test.py::test_specs_with_discriminator_in_allOf_fail_because_not_in_properties",
"tests/validator20/validate_spec_test.py::test_read_yaml_specs",
"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec0]",
"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec1]",
"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec2]",
"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec3]",
"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec4]",
"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec5]",
"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec6]",
"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec7]",
"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec8]",
"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec9]",
"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec10]",
"tests/validator20/validate_spec_test.py::test_valid_specs_with_check_of_default_types[property_spec11]",
"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec0-type-wrong_type]",
"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec1-type-wrong_type]",
"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec2-type-wrong_type]",
"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec3-type-wrong_type]",
"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec4-type-wrong_type]",
"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec5-type-wrong_type]",
"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec6-type--1]",
"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec7-minLength-short_string]",
"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec8-type-not_a_number_or_boolean]",
"tests/validator20/validate_spec_test.py::test_failure_due_to_wrong_default_type[property_spec9-enum-not_valid]",
"tests/validator20/validate_spec_test.py::test_ref_without_str_argument",
"tests/validator20/validate_spec_test.py::test_failure_because_references_in_operation_responses",
"tests/validator20/validate_spec_test.py::test_type_array_with_items_succeed_validation",
"tests/validator20/validate_spec_test.py::test_type_array_without_items_succeed_fails[swagger_dict_override0]"
] | [
"tests/validator20/validate_definitions_test.py::test_type_array_with_items_succeed_validation"
] | [] | Apache License 2.0 | 2,620 | 294 | [
"swagger_spec_validator/validator20.py"
] |
|
pika__pika-1066 | 17aed0fa20f55ed3bc080320414badbb27046e8d | 2018-06-06 22:49:26 | 4c904dea651caaf2a54b0fca0b9e908dec18a4f8 | diff --git a/examples/consume.py b/examples/consume.py
index da95d9e..7344149 100644
--- a/examples/consume.py
+++ b/examples/consume.py
@@ -1,17 +1,15 @@
+import functools
+import logging
import pika
-def on_message(channel, method_frame, header_frame, body):
- channel.queue_declare(queue=body, auto_delete=True)
+LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
+ '-35s %(lineno) -5d: %(message)s')
+LOGGER = logging.getLogger(__name__)
- if body.startswith("queue:"):
- queue = body.replace("queue:", "")
- key = body + "_key"
- print("Declaring queue %s bound with key %s" %(queue, key))
- channel.queue_declare(queue=queue, auto_delete=True)
- channel.queue_bind(queue=queue, exchange="test_exchange", routing_key=key)
- else:
- print("Message body", body)
+logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
+def on_message(channel, method_frame, header_frame, body, userdata=None):
+ LOGGER.info('Userdata: {} Message body: {}'.format(userdata, body))
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
credentials = pika.PlainCredentials('guest', 'guest')
@@ -24,7 +22,8 @@ channel.queue_declare(queue="standard", auto_delete=True)
channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key")
channel.basic_qos(prefetch_count=1)
-channel.basic_consume(on_message, 'standard')
+on_message_callback = functools.partial(on_message, userdata='on_message_userdata')
+channel.basic_consume(on_message_callback, 'standard')
try:
channel.start_consuming()
diff --git a/pika/heartbeat.py b/pika/heartbeat.py
index c02d5df..8d3d20a 100644
--- a/pika/heartbeat.py
+++ b/pika/heartbeat.py
@@ -23,13 +23,22 @@ class HeartbeatChecker(object):
:param pika.connection.Connection: Connection object
:param int interval: Heartbeat check interval. Note: heartbeats will
be sent at interval / 2 frequency.
+ :param int idle_count: The number of heartbeat intervals without data
+ received that will close the current connection.
"""
self._connection = connection
+
# Note: see the following document:
# https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout
self._interval = float(interval / 2)
- self._max_idle_count = idle_count
+
+ # Note: even though we're sending heartbeats in half the specified
+ # interval, the broker will be sending them to us at the specified
+ # interval. This means we'll be checking for an idle connection
+ # twice as many times as the broker will send heartbeats to us,
+ # so we need to double the max idle count here
+ self._max_idle_count = idle_count * 2
# Initialize counters
self._bytes_received = 0
@@ -82,9 +91,12 @@ class HeartbeatChecker(object):
been idle too long.
"""
- LOGGER.debug('Received %i heartbeat frames, sent %i',
+ LOGGER.debug('Received %i heartbeat frames, sent %i, '
+ 'idle intervals %i, max idle count %i',
self._heartbeat_frames_received,
- self._heartbeat_frames_sent)
+ self._heartbeat_frames_sent,
+ self._idle_byte_intervals,
+ self._max_idle_count)
if self.connection_is_idle:
return self._close_connection()
| HeartbeatChecker is confused about heartbeat timeouts
cc @lukebakken, the fix should probably be back-ported to the 0.12 release candidate.
`HeartbeatChecker` constructor presently accepts an interval value and an `idle_count` which defaults to 2. `Connection` class instantiates `HeartbeatChecker` with `interval=hearbeat_timeout` and default `idle_count`.
So, if the connection is configured with a heartbeat timeout of 600 (10 minutes), it will pass 600 as the `interval` arg to `HeartbeatChecker`. So, `HearbeatChecker` will emit heartbeats to the broker only once every 600 seconds. And it will detect heartbeat timeout after 1200 seconds.
So, in the event that receipt of the heartbeat by the broker is slightly delayed (and in absence of any other AMQP frames from the client), the broker can erroneously conclude that connection with the client is lost and prematurely close the connection.
This is clearly not what was intended. `HeartbeatChecker` should be detecting a heartbeat timeout after 600 seconds of inactivity. And it should be sending a heartbeat to the broker more often than just once within the heartbeat timeout window.
I see two problems here:
1. Given `HeartbeatChecker`'s present interface, `Connection` should be instantiating it as`HeartbeatChecker(self, interval=float(self.params.heartbeat) / 2, idle_count=2) or something like that (how often does RabbitMQ broker send heartbeats within one heartbeat timeout interval?)
2. `HeartbeatChecker` is not abstracting the internals of heartbeat processing sufficiently. It's constructor should accept the heartbeat timeout value directly (no interval/idle_count business) and encapsulate the frequency of heartbeats internally without bleeding that detail to the `Connection`.
| pika/pika | diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py
index fa97338..f0431c2 100644
--- a/tests/unit/heartbeat_tests.py
+++ b/tests/unit/heartbeat_tests.py
@@ -29,7 +29,7 @@ class HeartbeatTests(unittest.TestCase):
self.assertEqual(self.obj._interval, self.HALF_INTERVAL)
def test_default_initialization_max_idle_count(self):
- self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT)
+ self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT * 2)
def test_constructor_assignment_connection(self):
self.assertIs(self.obj._connection, self.mock_conn)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 2
},
"num_modified_files": 2
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock",
"cryptography"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
cffi==1.17.1
charset-normalizer==3.4.1
codecov==2.1.13
coverage==7.8.0
cryptography==44.0.2
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
mock==5.2.0
nose==1.3.7
packaging==24.2
-e git+https://github.com/pika/pika.git@17aed0fa20f55ed3bc080320414badbb27046e8d#egg=pika
pluggy==1.5.0
pycparser==2.22
pytest==8.3.5
requests==2.32.3
tomli==2.2.1
tornado==6.4.2
Twisted==15.3.0
urllib3==2.3.0
zope.interface==7.2
| name: pika
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- cffi==1.17.1
- charset-normalizer==3.4.1
- codecov==2.1.13
- coverage==7.8.0
- cryptography==44.0.2
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- mock==5.2.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pycparser==2.22
- pytest==8.3.5
- requests==2.32.3
- tomli==2.2.1
- tornado==6.4.2
- twisted==15.3.0
- urllib3==2.3.0
- zope-interface==7.2
prefix: /opt/conda/envs/pika
| [
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_max_idle_count"
] | [] | [
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_false",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_active_true",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_heartbeat_interval",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_setup_timer",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_default_initialization_interval",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_received",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_bytes",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_increment_no_bytes",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_missed_bytes",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_not_closed",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_send_heartbeat_frame",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_start_timer",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_and_check_update_counters",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_setup_timer_called",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_not_active",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,632 | 864 | [
"examples/consume.py",
"pika/heartbeat.py"
] |
|
oasis-open__cti-stix-validator-55 | f3dcf83c352c99b5190e9697db7149ce3baf5961 | 2018-06-08 12:26:02 | 120c27adf9db76511d01e696d234c35d45f2face | diff --git a/stix2validator/scripts/stix2_validator.py b/stix2validator/scripts/stix2_validator.py
index 15bd7b0..8dda167 100644
--- a/stix2validator/scripts/stix2_validator.py
+++ b/stix2validator/scripts/stix2_validator.py
@@ -292,9 +292,6 @@ def main():
options = ValidationOptions(args)
try:
- # Set the output level (e.g., quiet vs. verbose)
- output.set_level(options.verbose)
-
if not options.no_cache:
init_requests_cache(options.refresh_cache)
diff --git a/stix2validator/util.py b/stix2validator/util.py
index 4da0be5..327931f 100644
--- a/stix2validator/util.py
+++ b/stix2validator/util.py
@@ -1,5 +1,7 @@
from collections import Iterable
+from .output import error, set_level, set_silent
+
class ValidationOptions(object):
"""Collection of validation options which can be set via command line or
@@ -72,6 +74,12 @@ class ValidationOptions(object):
self.refresh_cache = refresh_cache
self.clear_cache = clear_cache
+ # Set the output level (e.g., quiet vs. verbose)
+ if self.silent and self.verbose:
+ error('Error: Output can either be silent or verbose, but not both.')
+ set_level(self.verbose)
+ set_silent(self.silent)
+
# Convert string of comma-separated checks to a list,
# and convert check code numbers to names
if self.disabled:
| handle options --verbose and --silent correctly
Related to #50
The correct combination of these two should be as follows:
|--verbose | --silent | desired behavior |
| --- | --- | --- |
|absent (default is False) | absent (default is False) | all messages except those printed by info |
|absent (default is False) | present (True) | no messages printed
| present (True) | absent (default is False) | all messages, including info are printed
| present (True) | present (True) | error |
Current behavior is:
|--verbose | --silent | current behavior |
| --- | --- | --- |
|absent (default is False) | absent (default is False) | all messages except those printed by info |
|absent (default is False) | present (ignored, so the default - False) | all messages except those printed by info |
| present (True) | absent (default is False) | all messages, including info are printed
| present (True) | present (ignored, so the default - False) | all messages, including info are printed | | oasis-open/cti-stix-validator | diff --git a/stix2validator/test/bundle_tests.py b/stix2validator/test/bundle_tests.py
index 8f417bd..52235ba 100644
--- a/stix2validator/test/bundle_tests.py
+++ b/stix2validator/test/bundle_tests.py
@@ -1,6 +1,8 @@
import copy
import json
+import pytest
+
from . import ValidatorTest
VALID_BUNDLE = u"""
@@ -51,3 +53,8 @@ class BundleTestCases(ValidatorTest):
bundle['objects'][1]['modified'] = "2017-06-22T14:09:00.123Z"
self.assertTrueWithOptions(bundle)
+
+ def test_silent_and_verbose(self):
+ bundle = json.loads(VALID_BUNDLE)
+ with pytest.raises(SystemExit):
+ self.assertFalseWithOptions(bundle, silent=True, verbose=True)
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 1
},
"num_modified_files": 2
} | 0.5 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov",
"coverage"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.13
antlr4-python3-runtime==4.9.3
appdirs==1.4.4
attrs==21.4.0
Babel==2.11.0
bump2version==1.0.1
bumpversion==0.6.0
certifi==2021.5.30
cfgv==3.3.1
charset-normalizer==2.0.12
colorama==0.4.5
coverage==6.2
distlib==0.3.9
docutils==0.18.1
filelock==3.4.1
identify==2.4.4
idna==3.10
imagesize==1.4.1
importlib-metadata==4.8.3
importlib-resources==5.2.3
iniconfig==1.1.1
itsdangerous==2.0.1
Jinja2==3.0.3
jsonschema==2.5.1
MarkupSafe==2.0.1
nodeenv==1.6.0
packaging==21.3
platformdirs==2.4.0
pluggy==1.0.0
pre-commit==2.17.0
py==1.11.0
Pygments==2.14.0
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
python-dateutil==2.9.0.post0
pytz==2025.2
PyYAML==6.0.1
requests==2.27.1
requests-cache==0.7.5
simplejson==3.20.1
six==1.17.0
snowballstemmer==2.2.0
Sphinx==5.3.0
sphinx-prompt==1.5.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.5
stix2-patterns==2.0.0
-e git+https://github.com/oasis-open/cti-stix-validator.git@f3dcf83c352c99b5190e9697db7149ce3baf5961#egg=stix2_validator
toml==0.10.2
tomli==1.2.3
tox==3.28.0
typing_extensions==4.1.1
url-normalize==1.4.3
urllib3==1.26.20
virtualenv==20.16.2
zipp==3.6.0
| name: cti-stix-validator
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.13
- antlr4-python3-runtime==4.9.3
- appdirs==1.4.4
- attrs==21.4.0
- babel==2.11.0
- bump2version==1.0.1
- bumpversion==0.6.0
- cfgv==3.3.1
- charset-normalizer==2.0.12
- colorama==0.4.5
- coverage==6.2
- distlib==0.3.9
- docutils==0.18.1
- filelock==3.4.1
- identify==2.4.4
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==4.8.3
- importlib-resources==5.2.3
- iniconfig==1.1.1
- itsdangerous==2.0.1
- jinja2==3.0.3
- jsonschema==2.5.1
- markupsafe==2.0.1
- nodeenv==1.6.0
- packaging==21.3
- platformdirs==2.4.0
- pluggy==1.0.0
- pre-commit==2.17.0
- py==1.11.0
- pygments==2.14.0
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- python-dateutil==2.9.0.post0
- pytz==2025.2
- pyyaml==6.0.1
- requests==2.27.1
- requests-cache==0.7.5
- simplejson==3.20.1
- six==1.17.0
- snowballstemmer==2.2.0
- sphinx==5.3.0
- sphinx-prompt==1.5.0
- sphinxcontrib-applehelp==1.0.2
- sphinxcontrib-devhelp==1.0.2
- sphinxcontrib-htmlhelp==2.0.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==1.0.3
- sphinxcontrib-serializinghtml==1.1.5
- stix2-patterns==2.0.0
- toml==0.10.2
- tomli==1.2.3
- tox==3.28.0
- typing-extensions==4.1.1
- url-normalize==1.4.3
- urllib3==1.26.20
- virtualenv==20.16.2
- zipp==3.6.0
prefix: /opt/conda/envs/cti-stix-validator
| [
"stix2validator/test/bundle_tests.py::BundleTestCases::test_silent_and_verbose"
] | [
"stix2validator/test/bundle_tests.py::BundleTestCases::test_bundle_duplicate_ids",
"stix2validator/test/bundle_tests.py::BundleTestCases::test_wellformed_bundle"
] | [
"stix2validator/test/bundle_tests.py::BundleTestCases::test_bundle_created",
"stix2validator/test/bundle_tests.py::BundleTestCases::test_bundle_object_categories",
"stix2validator/test/bundle_tests.py::BundleTestCases::test_bundle_version"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,640 | 375 | [
"stix2validator/scripts/stix2_validator.py",
"stix2validator/util.py"
] |
|
attwad__python-osc-67 | 73777b367ac4327e9fd0b799366959e50266ebc2 | 2018-06-08 20:16:51 | 73777b367ac4327e9fd0b799366959e50266ebc2 | diff --git a/pythonosc/osc_message.py b/pythonosc/osc_message.py
index 911dbcf..b89a0d6 100644
--- a/pythonosc/osc_message.py
+++ b/pythonosc/osc_message.py
@@ -41,6 +41,8 @@ class OscMessage(object):
val, index = osc_types.get_int(self._dgram, index)
elif param == "f": # Float.
val, index = osc_types.get_float(self._dgram, index)
+ elif param == "d": # Double.
+ val, index = osc_types.get_double(self._dgram, index)
elif param == "s": # String.
val, index = osc_types.get_string(self._dgram, index)
elif param == "b": # Blob.
diff --git a/pythonosc/osc_message_builder.py b/pythonosc/osc_message_builder.py
index 0f9bfba..28128fb 100644
--- a/pythonosc/osc_message_builder.py
+++ b/pythonosc/osc_message_builder.py
@@ -12,6 +12,7 @@ class OscMessageBuilder(object):
"""Builds arbitrary OscMessage instances."""
ARG_TYPE_FLOAT = "f"
+ ARG_TYPE_DOUBLE = "d"
ARG_TYPE_INT = "i"
ARG_TYPE_STRING = "s"
ARG_TYPE_BLOB = "b"
@@ -24,8 +25,8 @@ class OscMessageBuilder(object):
ARG_TYPE_ARRAY_STOP = "]"
_SUPPORTED_ARG_TYPES = (
- ARG_TYPE_FLOAT, ARG_TYPE_INT, ARG_TYPE_BLOB, ARG_TYPE_STRING, ARG_TYPE_RGBA,
- ARG_TYPE_MIDI, ARG_TYPE_TRUE, ARG_TYPE_FALSE)
+ ARG_TYPE_FLOAT, ARG_TYPE_DOUBLE, ARG_TYPE_INT, ARG_TYPE_BLOB, ARG_TYPE_STRING,
+ ARG_TYPE_RGBA, ARG_TYPE_MIDI, ARG_TYPE_TRUE, ARG_TYPE_FALSE)
def __init__(self, address=None):
"""Initialize a new builder for a message.
@@ -143,6 +144,8 @@ class OscMessageBuilder(object):
dgram += osc_types.write_int(value)
elif arg_type == self.ARG_TYPE_FLOAT:
dgram += osc_types.write_float(value)
+ elif arg_type == self.ARG_TYPE_DOUBLE:
+ dgram += osc_types.write_double(value)
elif arg_type == self.ARG_TYPE_BLOB:
dgram += osc_types.write_blob(value)
elif arg_type == self.ARG_TYPE_RGBA:
diff --git a/pythonosc/parsing/osc_types.py b/pythonosc/parsing/osc_types.py
index a91003b..5558399 100644
--- a/pythonosc/parsing/osc_types.py
+++ b/pythonosc/parsing/osc_types.py
@@ -21,6 +21,7 @@ IMMEDIATELY = 0
# Datagram length in bytes for types that have a fixed size.
_INT_DGRAM_LEN = 4
_FLOAT_DGRAM_LEN = 4
+_DOUBLE_DGRAM_LEN = 8
_DATE_DGRAM_LEN = _INT_DGRAM_LEN * 2
# Strings and blob dgram length is always a multiple of 4 bytes.
_STRING_DGRAM_PAD = 4
@@ -199,6 +200,42 @@ def get_float(dgram, start_index):
raise ParseError('Could not parse datagram %s' % e)
+def write_double(val):
+ """Returns the datagram for the given double parameter value
+
+ Raises:
+ - BuildError if the double could not be converted.
+ """
+ try:
+ return struct.pack('>d', val)
+ except struct.error as e:
+ raise BuildError('Wrong argument value passed: {}'.format(e))
+
+
+def get_double(dgram, start_index):
+ """Get a 64-bit big-endian IEEE 754 floating point number from the datagram.
+
+ Args:
+ dgram: A datagram packet.
+ start_index: An index where the double starts in the datagram.
+
+ Returns:
+ A tuple containing the double and the new end index.
+
+ Raises:
+ ParseError if the datagram could not be parsed.
+ """
+ try:
+ if len(dgram[start_index:]) < _DOUBLE_DGRAM_LEN:
+ raise ParseError('Datagram is too short')
+ return (
+ struct.unpack('>d',
+ dgram[start_index:start_index + _DOUBLE_DGRAM_LEN])[0],
+ start_index + _DOUBLE_DGRAM_LEN)
+ except (struct.error, TypeError) as e:
+ raise ParseError('Could not parse datagram {}'.format(e))
+
+
def get_blob(dgram, start_index):
""" Get a blob from the datagram.
| Add support for 64 bits double type
`unhandled type: d` warnings are all that gets returned, no handlers even end up running. | attwad/python-osc | diff --git a/pythonosc/test/parsing/test_osc_types.py b/pythonosc/test/parsing/test_osc_types.py
index 0863fd5..8734ad1 100644
--- a/pythonosc/test/parsing/test_osc_types.py
+++ b/pythonosc/test/parsing/test_osc_types.py
@@ -232,6 +232,39 @@ class TestFloat(unittest.TestCase):
self.assertEqual((0, 4), osc_types.get_float(dgram, 0))
+class TestDouble(unittest.TestCase):
+
+ def test_get_double(self):
+ cases = {
+ b'\x00\x00\x00\x00\x00\x00\x00\x00': (0.0, 8),
+ b'?\xf0\x00\x00\x00\x00\x00\x00': (1.0, 8),
+ b'@\x00\x00\x00\x00\x00\x00\x00': (2.0, 8),
+ b'\xbf\xf0\x00\x00\x00\x00\x00\x00': (-1.0, 8),
+ b'\xc0\x00\x00\x00\x00\x00\x00\x00': (-2.0, 8),
+
+ b"\x00\x00\x00\x00\x00\x00\x00\x00GARBAGE": (0.0, 8),
+ }
+
+ for dgram, expected in cases.items():
+ self.assertAlmostEqual(expected, osc_types.get_double(dgram, 0))
+
+ def test_get_double_raises_on_wrong_dgram(self):
+ cases = [True]
+
+ for case in cases:
+ self.assertRaises(osc_types.ParseError, osc_types.get_double, case, 0)
+
+ def test_get_double_raises_on_type_error(self):
+ cases = [None]
+
+ for case in cases:
+ self.assertRaises(osc_types.ParseError, osc_types.get_double, case, 0)
+
+ def test_datagram_too_short_pads(self):
+ dgram = b'\x00' * 2
+ self.assertRaises(osc_types.ParseError, osc_types.get_double, dgram, 0)
+
+
class TestBlob(unittest.TestCase):
def test_get_blob(self):
diff --git a/pythonosc/test/test_osc_message_builder.py b/pythonosc/test/test_osc_message_builder.py
index c9720b4..d5bbe25 100644
--- a/pythonosc/test/test_osc_message_builder.py
+++ b/pythonosc/test/test_osc_message_builder.py
@@ -43,14 +43,15 @@ class TestOscMessageBuilder(unittest.TestCase):
builder.add_arg([1, ["abc"]], [builder.ARG_TYPE_INT, [builder.ARG_TYPE_STRING]])
builder.add_arg(4278255360, builder.ARG_TYPE_RGBA)
builder.add_arg((1, 145, 36, 125), builder.ARG_TYPE_MIDI)
- self.assertEqual(len("fisTFb[i[s]]")*2+2, len(builder.args))
+ builder.add_arg(1e-9, builder.ARG_TYPE_DOUBLE)
+ self.assertEqual(len("fisTFb[i[s]]")*2+3, len(builder.args))
self.assertEqual("/SYNC", builder.address)
builder.address = '/SEEK'
msg = builder.build()
self.assertEqual("/SEEK", msg.address)
self.assertSequenceEqual(
[4.0, 2, "value", True, False, b"\x01\x02\x03", [1, ["abc"]]] * 2 +
- [4278255360, (1, 145, 36, 125)],
+ [4278255360, (1, 145, 36, 125), 1e-9],
msg.params)
def test_long_list(self):
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 0
},
"num_modified_files": 3
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
-e git+https://github.com/attwad/python-osc.git@73777b367ac4327e9fd0b799366959e50266ebc2#egg=python_osc
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: python-osc
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/python-osc
| [
"pythonosc/test/parsing/test_osc_types.py::TestDouble::test_datagram_too_short_pads",
"pythonosc/test/parsing/test_osc_types.py::TestDouble::test_get_double",
"pythonosc/test/parsing/test_osc_types.py::TestDouble::test_get_double_raises_on_type_error",
"pythonosc/test/parsing/test_osc_types.py::TestDouble::test_get_double_raises_on_wrong_dgram",
"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_all_param_types"
] | [] | [
"pythonosc/test/parsing/test_osc_types.py::TestString::test_get_string",
"pythonosc/test/parsing/test_osc_types.py::TestString::test_get_string_raises_on_wrong_dgram",
"pythonosc/test/parsing/test_osc_types.py::TestString::test_get_string_raises_on_wrong_start_index_negative",
"pythonosc/test/parsing/test_osc_types.py::TestString::test_get_string_raises_when_datagram_too_short",
"pythonosc/test/parsing/test_osc_types.py::TestInteger::test_datagram_too_short",
"pythonosc/test/parsing/test_osc_types.py::TestInteger::test_get_integer",
"pythonosc/test/parsing/test_osc_types.py::TestInteger::test_get_integer_raises_on_type_error",
"pythonosc/test/parsing/test_osc_types.py::TestInteger::test_get_integer_raises_on_wrong_start_index",
"pythonosc/test/parsing/test_osc_types.py::TestInteger::test_get_integer_raises_on_wrong_start_index_negative",
"pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_datagram_too_short",
"pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_get_rgba",
"pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_get_rgba_raises_on_type_error",
"pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_get_rgba_raises_on_wrong_start_index",
"pythonosc/test/parsing/test_osc_types.py::TestRGBA::test_get_rgba_raises_on_wrong_start_index_negative",
"pythonosc/test/parsing/test_osc_types.py::TestMidi::test_datagram_too_short",
"pythonosc/test/parsing/test_osc_types.py::TestMidi::test_get_midi",
"pythonosc/test/parsing/test_osc_types.py::TestMidi::test_get_midi_raises_on_type_error",
"pythonosc/test/parsing/test_osc_types.py::TestMidi::test_get_midi_raises_on_wrong_start_index",
"pythonosc/test/parsing/test_osc_types.py::TestMidi::test_get_midi_raises_on_wrong_start_index_negative",
"pythonosc/test/parsing/test_osc_types.py::TestDate::test_get_ttag",
"pythonosc/test/parsing/test_osc_types.py::TestDate::test_get_ttag_raises_on_type_error",
"pythonosc/test/parsing/test_osc_types.py::TestDate::test_get_ttag_raises_on_wrong_start_index",
"pythonosc/test/parsing/test_osc_types.py::TestDate::test_get_ttag_raises_on_wrong_start_index_negative",
"pythonosc/test/parsing/test_osc_types.py::TestDate::test_ttag_datagram_too_short",
"pythonosc/test/parsing/test_osc_types.py::TestFloat::test_datagram_too_short_pads",
"pythonosc/test/parsing/test_osc_types.py::TestFloat::test_get_float",
"pythonosc/test/parsing/test_osc_types.py::TestFloat::test_get_float_raises_on_type_error",
"pythonosc/test/parsing/test_osc_types.py::TestFloat::test_get_float_raises_on_wrong_dgram",
"pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blob",
"pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blob_raises_on_wrong_dgram",
"pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blob_raises_on_wrong_start_index",
"pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blob_raises_too_short_buffer",
"pythonosc/test/parsing/test_osc_types.py::TestBlob::test_get_blog_raises_on_wrong_start_index_negative",
"pythonosc/test/parsing/test_osc_types.py::TestNTPTimestamp::test_datagram_too_short",
"pythonosc/test/parsing/test_osc_types.py::TestNTPTimestamp::test_immediately_dgram",
"pythonosc/test/parsing/test_osc_types.py::TestNTPTimestamp::test_origin_of_time",
"pythonosc/test/parsing/test_osc_types.py::TestNTPTimestamp::test_write_date",
"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_blob",
"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_blob_raises",
"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_float",
"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_float_raises",
"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_int",
"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_int_raises",
"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_string",
"pythonosc/test/parsing/test_osc_types.py::TestBuildMethods::test_string_raises",
"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_add_arg_invalid_infered_type",
"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_bool_encoding",
"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_build_noarg_message",
"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_build_wrong_type_raises",
"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_just_address",
"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_long_list",
"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_no_address_raises",
"pythonosc/test/test_osc_message_builder.py::TestOscMessageBuilder::test_wrong_param_raise"
] | [] | The Unlicense | 2,642 | 1,065 | [
"pythonosc/osc_message.py",
"pythonosc/osc_message_builder.py",
"pythonosc/parsing/osc_types.py"
] |
|
pika__pika-1071 | 107fb0fd7028250fda0d8f901b65c93a91d7cb82 | 2018-06-09 18:20:40 | 4c904dea651caaf2a54b0fca0b9e908dec18a4f8 | vitaly-krugl: @lukebakken, let's work out one of these parallel pull requests first, then create the second one after that.
michaelklishin: This [rabbitmq-users thread](https://groups.google.com/d/msg/rabbitmq-users/Fmfeqe5ocTY/0fxMMVsSAgAJ) is worth mentioning. | diff --git a/examples/consume.py b/examples/consume.py
index 7344149..26e4620 100644
--- a/examples/consume.py
+++ b/examples/consume.py
@@ -1,3 +1,4 @@
+"""Basic message consumer example"""
import functools
import logging
import pika
@@ -8,26 +9,36 @@ LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
-def on_message(channel, method_frame, header_frame, body, userdata=None):
- LOGGER.info('Userdata: {} Message body: {}'.format(userdata, body))
- channel.basic_ack(delivery_tag=method_frame.delivery_tag)
-
-credentials = pika.PlainCredentials('guest', 'guest')
-parameters = pika.ConnectionParameters('localhost', credentials=credentials)
-connection = pika.BlockingConnection(parameters)
-
-channel = connection.channel()
-channel.exchange_declare(exchange="test_exchange", exchange_type="direct", passive=False, durable=True, auto_delete=False)
-channel.queue_declare(queue="standard", auto_delete=True)
-channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key")
-channel.basic_qos(prefetch_count=1)
-
-on_message_callback = functools.partial(on_message, userdata='on_message_userdata')
-channel.basic_consume(on_message_callback, 'standard')
-
-try:
- channel.start_consuming()
-except KeyboardInterrupt:
- channel.stop_consuming()
-
-connection.close()
+def on_message(chan, method_frame, _header_frame, body, userdata=None):
+ """Called when a message is received. Log message and ack it."""
+ LOGGER.info('Userdata: %s Message body: %s', userdata, body)
+ chan.basic_ack(delivery_tag=method_frame.delivery_tag)
+
+def main():
+ """Main method."""
+ credentials = pika.PlainCredentials('guest', 'guest')
+ parameters = pika.ConnectionParameters('localhost', credentials=credentials)
+ connection = pika.BlockingConnection(parameters)
+
+ channel = connection.channel()
+ channel.exchange_declare(exchange="test_exchange",
+ exchange_type="direct",
+ passive=False,
+ durable=True,
+ auto_delete=False)
+ channel.queue_declare(queue="standard", auto_delete=True)
+ channel.queue_bind(queue="standard", exchange="test_exchange", routing_key="standard_key")
+ channel.basic_qos(prefetch_count=1)
+
+ on_message_callback = functools.partial(on_message, userdata='on_message_userdata')
+ channel.basic_consume(on_message_callback, 'standard')
+
+ try:
+ channel.start_consuming()
+ except KeyboardInterrupt:
+ channel.stop_consuming()
+
+ connection.close()
+
+if __name__ == '__main__':
+ main()
diff --git a/pika/connection.py b/pika/connection.py
index 0c4e2a7..bed9bdb 100644
--- a/pika/connection.py
+++ b/pika/connection.py
@@ -1301,7 +1301,7 @@ class Connection(object):
self._backpressure_multiplier = value
#
- # Connections state properties
+ # Connection state properties
#
@property
diff --git a/pika/heartbeat.py b/pika/heartbeat.py
index 8d3d20a..7d4d7dd 100644
--- a/pika/heartbeat.py
+++ b/pika/heartbeat.py
@@ -7,38 +7,67 @@ LOGGER = logging.getLogger(__name__)
class HeartbeatChecker(object):
- """Checks to make sure that our heartbeat is received at the expected
- intervals.
+ """Sends heartbeats to the broker. The provided timeout is used to
+ determine if the connection is stale - no received heartbeats or
+ other activity will close the connection. See the parameter list for more
+ details.
"""
- DEFAULT_INTERVAL = 60
- MAX_IDLE_COUNT = 2
_CONNECTION_FORCED = 320
- _STALE_CONNECTION = "Too Many Missed Heartbeats, No reply in %i seconds"
+ _STALE_CONNECTION = "No activity or too many missed meartbeats in the last %i seconds"
- def __init__(self, connection, interval=DEFAULT_INTERVAL, idle_count=MAX_IDLE_COUNT):
- """Create a heartbeat on connection sending a heartbeat frame every
- interval seconds.
+ def __init__(self, connection, timeout):
+ """Create an object that will check for activity on the provided
+ connection as well as receive heartbeat frames from the broker. The
+ timeout parameter defines a window within which this activity must
+ happen. If not, the connection is considered dead and closed.
+
+ The value passed for timeout is also used to calculate an interval
+ at which a heartbeat frame is sent to the broker. The interval is
+ equal to the timeout value divided by two.
:param pika.connection.Connection: Connection object
- :param int interval: Heartbeat check interval. Note: heartbeats will
- be sent at interval / 2 frequency.
- :param int idle_count: The number of heartbeat intervals without data
- received that will close the current connection.
+ :param int timeout: Connection idle timeout. If no activity occurs on the
+ connection nor heartbeat frames received during the
+ timeout window the connection will be closed. The
+ interval used to send heartbeats is calculated from
+ this value by dividing it by two.
"""
+ if timeout < 1:
+ raise ValueError('timeout must >= 0, but got %r' % (timeout,))
+
self._connection = connection
- # Note: see the following document:
+ # Note: see the following documents:
# https://www.rabbitmq.com/heartbeats.html#heartbeats-timeout
- self._interval = float(interval / 2)
-
- # Note: even though we're sending heartbeats in half the specified
- # interval, the broker will be sending them to us at the specified
- # interval. This means we'll be checking for an idle connection
- # twice as many times as the broker will send heartbeats to us,
- # so we need to double the max idle count here
- self._max_idle_count = idle_count * 2
+ # https://github.com/pika/pika/pull/1072
+ # https://groups.google.com/d/topic/rabbitmq-users/Fmfeqe5ocTY/discussion
+ # There is a certain amount of confusion around how client developers
+ # interpret the spec. The spec talks about 2 missed heartbeats as a
+ # *timeout*, plus that any activity on the connection counts for a
+ # heartbeat. This is to avoid edge cases and not to depend on network
+ # latency.
+ self._timeout = timeout
+
+ self._send_interval = float(timeout) / 2
+
+ # Note: Pika will calculate the heartbeat / connectivity check interval
+ # by adding 5 seconds to the negotiated timeout to leave a bit of room
+ # for broker heartbeats that may be right at the edge of the timeout
+ # window. This is different behavior from the RabbitMQ Java client and
+ # the spec that suggests a check interval equivalent to two times the
+ # heartbeat timeout value. But, one advantage of adding a small amount
+ # is that bad connections will be detected faster.
+ # https://github.com/pika/pika/pull/1072#issuecomment-397850795
+ # https://github.com/rabbitmq/rabbitmq-java-client/blob/b55bd20a1a236fc2d1ea9369b579770fa0237615/src/main/java/com/rabbitmq/client/impl/AMQConnection.java#L773-L780
+ # https://github.com/ruby-amqp/bunny/blob/3259f3af2e659a49c38c2470aa565c8fb825213c/lib/bunny/session.rb#L1187-L1192
+ self._check_interval = timeout + 5
+
+ LOGGER.debug('timeout: %f send_interval: %f check_interval: %f',
+ self._timeout,
+ self._send_interval,
+ self._check_interval)
# Initialize counters
self._bytes_received = 0
@@ -47,21 +76,10 @@ class HeartbeatChecker(object):
self._heartbeat_frames_sent = 0
self._idle_byte_intervals = 0
- # The handle for the last timer
- self._timer = None
-
- # Setup the timer to fire in _interval seconds
- self._setup_timer()
-
- @property
- def active(self):
- """Return True if the connection's heartbeat attribute is set to this
- instance.
-
- :rtype True
-
- """
- return self._connection.heartbeat is self
+ self._send_timer = None
+ self._check_timer = None
+ self._start_send_timer()
+ self._start_check_timer()
@property
def bytes_received_on_connection(self):
@@ -78,74 +96,78 @@ class HeartbeatChecker(object):
to trip the max idle threshold.
"""
- return self._idle_byte_intervals >= self._max_idle_count
+ return self._idle_byte_intervals > 0
def received(self):
"""Called when a heartbeat is received"""
LOGGER.debug('Received heartbeat frame')
self._heartbeat_frames_received += 1
- def send_and_check(self):
- """Invoked by a timer to send a heartbeat when we need to, check to see
+ def _send_heartbeat(self):
+ """Invoked by a timer to send a heartbeat when we need to.
+
+ """
+ LOGGER.debug('Sending heartbeat frame')
+ self._send_heartbeat_frame()
+ self._start_send_timer()
+
+ def _check_heartbeat(self):
+ """Invoked by a timer to check for broker heartbeats. Checks to see
if we've missed any heartbeats and disconnect our connection if it's
been idle too long.
"""
+ if self._has_received_data:
+ self._idle_byte_intervals = 0
+ else:
+ # Connection has not received any data, increment the counter
+ self._idle_byte_intervals += 1
+
LOGGER.debug('Received %i heartbeat frames, sent %i, '
- 'idle intervals %i, max idle count %i',
+ 'idle intervals %i',
self._heartbeat_frames_received,
self._heartbeat_frames_sent,
- self._idle_byte_intervals,
- self._max_idle_count)
+ self._idle_byte_intervals)
if self.connection_is_idle:
- return self._close_connection()
-
- # Connection has not received any data, increment the counter
- if not self._has_received_data:
- self._idle_byte_intervals += 1
- else:
- self._idle_byte_intervals = 0
+ self._close_connection()
+ return
- # Update the counters of bytes sent/received and the frames received
- self._update_counters()
-
- # Send a heartbeat frame
- self._send_heartbeat_frame()
-
- # Update the timer to fire again
- self._start_timer()
+ self._start_check_timer()
def stop(self):
"""Stop the heartbeat checker"""
- if self._timer:
- LOGGER.debug('Removing timeout for next heartbeat interval')
- self._connection.remove_timeout(self._timer)
- self._timer = None
+ if self._send_timer:
+ LOGGER.debug('Removing timer for next heartbeat send interval')
+ self._connection.remove_timeout(self._send_timer) # pylint: disable=W0212
+ self._send_timer = None
+ if self._check_timer:
+ LOGGER.debug('Removing timer for next heartbeat check interval')
+ self._connection.remove_timeout(self._check_timer) # pylint: disable=W0212
+ self._check_timer = None
def _close_connection(self):
"""Close the connection with the AMQP Connection-Forced value."""
LOGGER.info('Connection is idle, %i stale byte intervals',
self._idle_byte_intervals)
- duration = self._max_idle_count * self._interval
- text = HeartbeatChecker._STALE_CONNECTION % duration
+ text = HeartbeatChecker._STALE_CONNECTION % self._timeout
# NOTE: this won't achieve the perceived effect of sending
# Connection.Close to broker, because the frame will only get buffered
# in memory before the next statement terminates the connection.
self._connection.close(HeartbeatChecker._CONNECTION_FORCED, text)
- self._connection._on_terminate(HeartbeatChecker._CONNECTION_FORCED,
+ self._connection._on_terminate(HeartbeatChecker._CONNECTION_FORCED, # pylint: disable=W0212
text)
@property
def _has_received_data(self):
- """Returns True if the connection has received data on the connection.
+ """Returns True if the connection has received data.
:rtype: bool
"""
- return not self._bytes_received == self.bytes_received_on_connection
+ return self._bytes_received != self.bytes_received_on_connection
@staticmethod
def _new_heartbeat_frame():
@@ -161,25 +183,27 @@ class HeartbeatChecker(object):
"""
LOGGER.debug('Sending heartbeat frame')
- self._connection._send_frame(self._new_heartbeat_frame())
+ self._connection._send_frame( # pylint: disable=W0212
+ self._new_heartbeat_frame())
self._heartbeat_frames_sent += 1
- def _setup_timer(self):
- """Use the connection objects delayed_call function which is
- implemented by the Adapter for calling the check_heartbeats function
- every interval seconds.
-
- """
- self._timer = self._connection.add_timeout(self._interval,
- self.send_and_check)
-
- def _start_timer(self):
- """If the connection still has this object set for heartbeats, add a
- new timer.
+ def _start_send_timer(self):
+ """Start a new heartbeat send timer."""
+ self._send_timer = self._connection.add_timeout( # pylint: disable=W0212
+ self._send_interval,
+ self._send_heartbeat)
+
+ def _start_check_timer(self):
+ """Start a new heartbeat check timer."""
+ # Note: update counters now to get current values
+ # at the start of the timeout window. Values will be
+ # checked against the connection's byte count at the
+ # end of the window
+ self._update_counters()
- """
- if self.active:
- self._setup_timer()
+ self._check_timer = self._connection.add_timeout( # pylint: disable=W0212
+ self._check_interval,
+ self._check_heartbeat)
def _update_counters(self):
"""Update the internal counters for bytes sent and received and the
| HeartbeatChecker is confused about heartbeat timeouts
cc @lukebakken, the fix should probably be back-ported to the 0.12 release candidate.
`HeartbeatChecker` constructor presently accepts an interval value and an `idle_count` which defaults to 2. `Connection` class instantiates `HeartbeatChecker` with `interval=hearbeat_timeout` and default `idle_count`.
So, if the connection is configured with a heartbeat timeout of 600 (10 minutes), it will pass 600 as the `interval` arg to `HeartbeatChecker`. So, `HearbeatChecker` will emit heartbeats to the broker only once every 600 seconds. And it will detect heartbeat timeout after 1200 seconds.
So, in the event that receipt of the heartbeat by the broker is slightly delayed (and in absence of any other AMQP frames from the client), the broker can erroneously conclude that connection with the client is lost and prematurely close the connection.
This is clearly not what was intended. `HeartbeatChecker` should be detecting a heartbeat timeout after 600 seconds of inactivity. And it should be sending a heartbeat to the broker more often than just once within the heartbeat timeout window.
I see two problems here:
1. Given `HeartbeatChecker`'s present interface, `Connection` should be instantiating it as`HeartbeatChecker(self, interval=float(self.params.heartbeat) / 2, idle_count=2) or something like that (how often does RabbitMQ broker send heartbeats within one heartbeat timeout interval?)
2. `HeartbeatChecker` is not abstracting the internals of heartbeat processing sufficiently. It's constructor should accept the heartbeat timeout value directly (no interval/idle_count business) and encapsulate the frequency of heartbeats internally without bleeding that detail to the `Connection`.
| pika/pika | diff --git a/tests/unit/heartbeat_tests.py b/tests/unit/heartbeat_tests.py
index f0431c2..71fa552 100644
--- a/tests/unit/heartbeat_tests.py
+++ b/tests/unit/heartbeat_tests.py
@@ -8,11 +8,11 @@ import mock
from pika import connection, frame, heartbeat
-
class HeartbeatTests(unittest.TestCase):
INTERVAL = 60
- HALF_INTERVAL = INTERVAL / 2
+ SEND_INTERVAL = float(INTERVAL) / 2
+ CHECK_INTERVAL = INTERVAL + 5
def setUp(self):
self.mock_conn = mock.Mock(spec=connection.Connection)
@@ -25,23 +25,26 @@ class HeartbeatTests(unittest.TestCase):
del self.obj
del self.mock_conn
- def test_default_initialization_interval(self):
- self.assertEqual(self.obj._interval, self.HALF_INTERVAL)
-
- def test_default_initialization_max_idle_count(self):
- self.assertEqual(self.obj._max_idle_count, self.obj.MAX_IDLE_COUNT * 2)
-
def test_constructor_assignment_connection(self):
self.assertIs(self.obj._connection, self.mock_conn)
- def test_constructor_assignment_heartbeat_interval(self):
- self.assertEqual(self.obj._interval, self.HALF_INTERVAL)
+ def test_constructor_assignment_intervals(self):
+ self.assertEqual(self.obj._send_interval, self.SEND_INTERVAL)
+ self.assertEqual(self.obj._check_interval, self.CHECK_INTERVAL)
def test_constructor_initial_bytes_received(self):
- self.assertEqual(self.obj._bytes_received, 0)
+ # Note: _bytes_received is initialized by calls
+ # to _start_check_timer which calls _update_counters
+ # which reads the initial values from the connection
+ self.assertEqual(self.obj._bytes_received,
+ self.mock_conn.bytes_received)
def test_constructor_initial_bytes_sent(self):
- self.assertEqual(self.obj._bytes_received, 0)
+ # Note: _bytes_received is initialized by calls
+ # to _start_check_timer which calls _update_counters
+ # which reads the initial values from the connection
+ self.assertEqual(self.obj._bytes_sent,
+ self.mock_conn.bytes_sent)
def test_constructor_initial_heartbeat_frames_received(self):
self.assertEqual(self.obj._heartbeat_frames_received, 0)
@@ -52,18 +55,15 @@ class HeartbeatTests(unittest.TestCase):
def test_constructor_initial_idle_byte_intervals(self):
self.assertEqual(self.obj._idle_byte_intervals, 0)
- @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer')
- def test_constructor_called_setup_timer(self, timer):
- heartbeat.HeartbeatChecker(self.mock_conn)
+ @mock.patch('pika.heartbeat.HeartbeatChecker._start_send_timer')
+ def test_constructor_called_start_send_timer(self, timer):
+ heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)
timer.assert_called_once_with()
- def test_active_true(self):
- self.mock_conn.heartbeat = self.obj
- self.assertTrue(self.obj.active)
-
- def test_active_false(self):
- self.mock_conn.heartbeat = mock.Mock()
- self.assertFalse(self.obj.active)
+ @mock.patch('pika.heartbeat.HeartbeatChecker._start_check_timer')
+ def test_constructor_called_start_check_timer(self, timer):
+ heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)
+ timer.assert_called_once_with()
def test_bytes_received_on_connection(self):
self.mock_conn.bytes_received = 128
@@ -81,54 +81,63 @@ class HeartbeatTests(unittest.TestCase):
self.assertTrue(self.obj._heartbeat_frames_received, 1)
@mock.patch('pika.heartbeat.HeartbeatChecker._close_connection')
- def test_send_and_check_not_closed(self, close_connection):
- obj = heartbeat.HeartbeatChecker(self.mock_conn)
- obj.send_and_check()
+ def test_send_heartbeat_not_closed(self, close_connection):
+ obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)
+ obj._send_heartbeat()
close_connection.assert_not_called()
@mock.patch('pika.heartbeat.HeartbeatChecker._close_connection')
- def test_send_and_check_missed_bytes(self, close_connection):
- obj = heartbeat.HeartbeatChecker(self.mock_conn)
+ def test_check_heartbeat_not_closed(self, close_connection):
+ obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)
+ self.mock_conn.bytes_received = 128
+ obj._check_heartbeat()
+ close_connection.assert_not_called()
+
+ @mock.patch('pika.heartbeat.HeartbeatChecker._close_connection')
+ def test_check_heartbeat_missed_bytes(self, close_connection):
+ obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)
obj._idle_byte_intervals = self.INTERVAL
- obj.send_and_check()
+ obj._check_heartbeat()
close_connection.assert_called_once_with()
- def test_send_and_check_increment_no_bytes(self):
+ def test_check_heartbeat_increment_no_bytes(self):
self.mock_conn.bytes_received = 100
self.obj._bytes_received = 100
- self.obj.send_and_check()
+ self.obj._check_heartbeat()
self.assertEqual(self.obj._idle_byte_intervals, 1)
- def test_send_and_check_increment_bytes(self):
+ def test_check_heartbeat_increment_bytes(self):
self.mock_conn.bytes_received = 100
self.obj._bytes_received = 128
- self.obj.send_and_check()
+ self.obj._check_heartbeat()
self.assertEqual(self.obj._idle_byte_intervals, 0)
@mock.patch('pika.heartbeat.HeartbeatChecker._update_counters')
- def test_send_and_check_update_counters(self, update_counters):
- obj = heartbeat.HeartbeatChecker(self.mock_conn)
- obj.send_and_check()
+ def test_check_heartbeat_update_counters(self, update_counters):
+ heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)
update_counters.assert_called_once_with()
@mock.patch('pika.heartbeat.HeartbeatChecker._send_heartbeat_frame')
- def test_send_and_check_send_heartbeat_frame(self, send_heartbeat_frame):
- obj = heartbeat.HeartbeatChecker(self.mock_conn)
- obj.send_and_check()
+ def test_send_heartbeat_sends_heartbeat_frame(self, send_heartbeat_frame):
+ obj = heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)
+ obj._send_heartbeat()
send_heartbeat_frame.assert_called_once_with()
- @mock.patch('pika.heartbeat.HeartbeatChecker._start_timer')
- def test_send_and_check_start_timer(self, start_timer):
- obj = heartbeat.HeartbeatChecker(self.mock_conn)
- obj.send_and_check()
- start_timer.assert_called_once_with()
+ @mock.patch('pika.heartbeat.HeartbeatChecker._start_send_timer')
+ def test_send_heartbeat_start_timer(self, start_send_timer):
+ heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)
+ start_send_timer.assert_called_once_with()
+
+ @mock.patch('pika.heartbeat.HeartbeatChecker._start_check_timer')
+ def test_check_heartbeat_start_timer(self, start_check_timer):
+ heartbeat.HeartbeatChecker(self.mock_conn, self.INTERVAL)
+ start_check_timer.assert_called_once_with()
def test_connection_close(self):
self.obj._idle_byte_intervals = 3
self.obj._idle_heartbeat_intervals = 4
self.obj._close_connection()
- reason = self.obj._STALE_CONNECTION % (
- self.obj._max_idle_count * self.obj._interval)
+ reason = self.obj._STALE_CONNECTION % self.obj._timeout
self.mock_conn.close.assert_called_once_with(
self.obj._CONNECTION_FORCED, reason)
self.mock_conn._on_terminate.assert_called_once_with(
@@ -157,20 +166,17 @@ class HeartbeatTests(unittest.TestCase):
self.obj._send_heartbeat_frame()
self.assertEqual(self.obj._heartbeat_frames_sent, 1)
- def test_setup_timer_called(self):
- self.mock_conn.add_timeout.assert_called_once_with(
- self.HALF_INTERVAL, self.obj.send_and_check)
-
- @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer')
- def test_start_timer_not_active(self, setup_timer):
- self.obj._start_timer()
- setup_timer.assert_not_called()
+ def test_start_send_timer_called(self):
+ want = [mock.call(self.SEND_INTERVAL, self.obj._send_heartbeat),
+ mock.call(self.CHECK_INTERVAL, self.obj._check_heartbeat)]
+ got = self.mock_conn.add_timeout.call_args_list
+ self.assertEqual(got, want)
- @mock.patch('pika.heartbeat.HeartbeatChecker._setup_timer')
- def test_start_timer_active(self, setup_timer):
+ @mock.patch('pika.heartbeat.HeartbeatChecker._start_send_timer')
+ def test_start_timer_active(self, setup_send_timer):
self.mock_conn.heartbeat = self.obj
- self.obj._start_timer()
- self.assertTrue(setup_timer.called)
+ self.obj._start_send_timer()
+ self.assertTrue(setup_send_timer.called)
def test_update_counters_bytes_received(self):
self.mock_conn.bytes_received = 256
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 2,
"test_score": 2
},
"num_modified_files": 3
} | 0.12 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"mock",
"coverage"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"test-requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | certifi==2025.1.31
charset-normalizer==3.4.1
codecov==2.1.13
coverage==7.8.0
exceptiongroup==1.2.2
idna==3.10
iniconfig==2.1.0
mock==5.2.0
nose==1.3.7
packaging==24.2
-e git+https://github.com/pika/pika.git@107fb0fd7028250fda0d8f901b65c93a91d7cb82#egg=pika
pluggy==1.5.0
pytest==8.3.5
requests==2.32.3
tomli==2.2.1
tornado==6.4.2
Twisted==15.3.0
urllib3==2.3.0
zope.interface==7.2
| name: pika
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- certifi==2025.1.31
- charset-normalizer==3.4.1
- codecov==2.1.13
- coverage==7.8.0
- exceptiongroup==1.2.2
- idna==3.10
- iniconfig==2.1.0
- mock==5.2.0
- nose==1.3.7
- packaging==24.2
- pluggy==1.5.0
- pytest==8.3.5
- requests==2.32.3
- tomli==2.2.1
- tornado==6.4.2
- twisted==15.3.0
- urllib3==2.3.0
- zope-interface==7.2
prefix: /opt/conda/envs/pika
| [
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_increment_bytes",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_increment_no_bytes",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_missed_bytes",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_not_closed",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_start_timer",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_check_heartbeat_update_counters",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_close",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_intervals",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_start_check_timer",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_called_start_send_timer",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_received",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_bytes_sent",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_not_closed",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_sends_heartbeat_frame",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_start_timer",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_send_timer_called",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_start_timer_active"
] | [] | [
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_bytes_received_on_connection",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_false",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_connection_is_idle_true",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_assignment_connection",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_received",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_heartbeat_frames_sent",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_constructor_initial_idle_byte_intervals",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_false",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_has_received_data_true",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_new_heartbeat_frame",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_received",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_counter_incremented",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_send_heartbeat_send_frame_called",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_received",
"tests/unit/heartbeat_tests.py::HeartbeatTests::test_update_counters_bytes_sent"
] | [] | BSD 3-Clause "New" or "Revised" License | 2,645 | 3,454 | [
"examples/consume.py",
"pika/connection.py",
"pika/heartbeat.py"
] |
EdinburghGenomics__clarity_scripts-62 | 6fd2d0a0f7fc2f12213daa9f265b8c5a35e3e7ef | 2018-06-11 10:59:25 | c2eec150467a3cd2185408cd44a6a773b8b6ee99 | diff --git a/scripts/convert_and_dispatch_genotypes.py b/scripts/convert_and_dispatch_genotypes.py
index 18720e0..aa7959e 100644
--- a/scripts/convert_and_dispatch_genotypes.py
+++ b/scripts/convert_and_dispatch_genotypes.py
@@ -241,8 +241,8 @@ class UploadVcfToSamples(StepEPP):
# This is the first genotyping results
lims_sample.udf[submitted_genotype_udf_number_call] = nb_call
lims_sample.udf[genotype_udf_file_id] = lims_file.id
- elif lims_sample.udf.get(submitted_genotype_udf_number_call) and \
- nb_call > lims_sample.udf.get(submitted_genotype_udf_number_call):
+ elif submitted_genotype_udf_number_call in lims_sample.udf and \
+ nb_call > (lims_sample.udf.get(submitted_genotype_udf_number_call) or 0):
# This genotyping is better than before
lims_sample.udf[submitted_genotype_udf_number_call] = nb_call
lims_sample.udf[genotype_udf_file_id] = lims_file.id
| ConvertGenotype does not overwrite best run = 0
In `convert_and_dispatch_genotypes.py` the overwriting of the best run UDF does not work when the best run is `0`
| EdinburghGenomics/clarity_scripts | diff --git a/tests/test_convert_and_dispatch_genotypes.py b/tests/test_convert_and_dispatch_genotypes.py
index 3d34ed3..020c52f 100644
--- a/tests/test_convert_and_dispatch_genotypes.py
+++ b/tests/test_convert_and_dispatch_genotypes.py
@@ -170,7 +170,7 @@ class TestUploadVcfToSamples(TestEPP):
}
self.lims_sample2.udf = {
'QuantStudio Data Import Completed #': 1,
- 'Number of Calls (Best Run)': 12,
+ 'Number of Calls (Best Run)': 0,
'Genotyping results file id': 'old_file_id'
}
mlims.upload_new_file.return_value = Mock(id='file_id')
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 3,
"test_score": 0
},
"num_modified_files": 1
} | 0.7 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": null,
"python": "3.6",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | asana==0.6.7
attrs==22.2.0
cached-property==1.5.2
certifi==2021.5.30
-e git+https://github.com/EdinburghGenomics/clarity_scripts.git@6fd2d0a0f7fc2f12213daa9f265b8c5a35e3e7ef#egg=clarity_scripts
coverage==6.2
EGCG-Core==0.8.1
importlib-metadata==4.8.3
iniconfig==1.1.1
Jinja2==2.8
MarkupSafe==2.0.1
oauthlib==3.2.2
packaging==21.3
pluggy==1.0.0
py==1.11.0
pyclarity-lims==0.4.8
pyparsing==3.1.4
pytest==7.0.1
pytest-cov==4.0.0
PyYAML==6.0.1
requests==2.14.2
requests-oauthlib==0.8.0
six==1.17.0
tomli==1.2.3
typing_extensions==4.1.1
zipp==3.6.0
| name: clarity_scripts
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- certifi=2021.5.30=py36h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.3=he6710b0_2
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=1.1.1w=h7f8727e_0
- pip=21.2.2=py36h06a4308_0
- python=3.6.13=h12debd9_1
- readline=8.2=h5eee18b_0
- setuptools=58.0.4=py36h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- wheel=0.37.1=pyhd3eb1b0_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- asana==0.6.7
- attrs==22.2.0
- cached-property==1.5.2
- coverage==6.2
- egcg-core==0.8.1
- importlib-metadata==4.8.3
- iniconfig==1.1.1
- jinja2==2.8
- markupsafe==2.0.1
- oauthlib==3.2.2
- packaging==21.3
- pluggy==1.0.0
- py==1.11.0
- pyclarity-lims==0.4.8
- pyparsing==3.1.4
- pytest==7.0.1
- pytest-cov==4.0.0
- pyyaml==6.0.1
- requests==2.14.2
- requests-oauthlib==0.8.0
- six==1.17.0
- tomli==1.2.3
- typing-extensions==4.1.1
- zipp==3.6.0
prefix: /opt/conda/envs/clarity_scripts
| [
"tests/test_convert_and_dispatch_genotypes.py::TestUploadVcfToSamples::test_upload_second_time"
] | [] | [
"tests/test_convert_and_dispatch_genotypes.py::TestEPP::test_init",
"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_find_field",
"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_generate_vcf",
"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_get_genotype_from_call",
"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_order_from_fai",
"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_parse_QuantStudio_AIF_genotype",
"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_parse_genome_fai",
"tests/test_convert_and_dispatch_genotypes.py::TestGenotypeConversion::test_vcf_header_from_ref_length",
"tests/test_convert_and_dispatch_genotypes.py::TestUploadVcfToSamples::test_init",
"tests/test_convert_and_dispatch_genotypes.py::TestUploadVcfToSamples::test_upload_first_time"
] | [] | MIT License | 2,653 | 274 | [
"scripts/convert_and_dispatch_genotypes.py"
] |
|
mido__mido-164 | e87384d7e5d62de361a65ab6b1d5d62750475e84 | 2018-06-11 12:41:40 | dd1b42f39678982e887a0bd5b25e104a6859ff5f | diff --git a/mido/frozen.py b/mido/frozen.py
index 2c04f1f..20d629d 100644
--- a/mido/frozen.py
+++ b/mido/frozen.py
@@ -3,10 +3,15 @@ from .midifiles import MetaMessage, UnknownMetaMessage
class Frozen(object):
- def __repr__(self):
- text = super(Frozen, self).__repr__()
+ def __str__(self):
+ text = super(Frozen, self).__str__()
return '<frozen {}'.format(text[1:])
+ def __repr__(self):
+ # canonicalize to mutable objects
+ r = super(Frozen, self).__repr__()
+ return r[len('Frozen'):]
+
def __setattr__(self, *_):
raise ValueError('frozen message is immutable')
diff --git a/mido/messages/messages.py b/mido/messages/messages.py
index f8f7e36..f1387d4 100644
--- a/mido/messages/messages.py
+++ b/mido/messages/messages.py
@@ -54,6 +54,15 @@ class BaseMessage(object):
"""
return cl(**data)
+ def __repr__(self):
+ d = self.dict()
+ msg_type = d.pop('type')
+ items = getattr(d, 'iteritems', d.items)
+ return "%s('%s', %s)" % (
+ type(self).__name__,
+ msg_type,
+ ', '.join('%s=%s' % item for item in items()))
+
@property
def is_realtime(self):
"""True if the message is a system realtime message."""
@@ -162,9 +171,6 @@ class Message(BaseMessage):
def __str__(self):
return msg2str(vars(self))
- def __repr__(self):
- return '<message {}>'.format(str(self))
-
def _setattr(self, name, value):
if name == 'type':
raise AttributeError('type attribute is read only')
diff --git a/mido/midifiles/meta.py b/mido/midifiles/meta.py
index be9552c..8b73da0 100644
--- a/mido/midifiles/meta.py
+++ b/mido/midifiles/meta.py
@@ -535,7 +535,7 @@ class MetaMessage(BaseMessage):
encode_variable_int(len(data)) +
data)
- def __repr__(self):
+ def __str__(self):
spec = _META_SPEC_BY_TYPE[self.type]
attributes = []
for name in spec.attributes:
@@ -561,7 +561,7 @@ class UnknownMetaMessage(MetaMessage):
'data': data,
'time': time})
- def __repr__(self):
+ def __str__(self):
return ('<unknown meta message'
' type_byte=0x{:02x} '
'data={!r} time={}>').format(self.type_byte,
@@ -569,6 +569,11 @@ class UnknownMetaMessage(MetaMessage):
self.time
)
+ def __repr__(self):
+ # fix message type artifact
+ r = super(UnknownMetaMessage, self).__repr__()
+ return r.replace("'unknown_meta', ", '')
+
def __setattr__(self, name, value):
# This doesn't do any checking.
# It probably should.
diff --git a/mido/midifiles/midifiles.py b/mido/midifiles/midifiles.py
index bb885a2..f3db7cb 100644
--- a/mido/midifiles/midifiles.py
+++ b/mido/midifiles/midifiles.py
@@ -293,7 +293,8 @@ class MidiFile(object):
type=1, ticks_per_beat=DEFAULT_TICKS_PER_BEAT,
charset='latin1',
debug=False,
- clip=False
+ clip=False,
+ tracks=None
):
self.filename = filename
@@ -309,7 +310,9 @@ class MidiFile(object):
raise ValueError(
'invalid format {} (must be 0, 1 or 2)'.format(format))
- if file is not None:
+ if tracks is not None:
+ self.tracks = tracks
+ elif file is not None:
self._load(file)
elif self.filename is not None:
with io.open(filename, 'rb') as file:
@@ -461,11 +464,18 @@ class MidiFile(object):
else:
print('{!r}'.format(msg))
- def __repr__(self):
+ def __str__(self):
return '<midi file {!r} type {}, {} tracks, {} messages>'.format(
self.filename, self.type, len(self.tracks),
sum([len(track) for track in self.tracks]))
+ def __repr__(self):
+ tracks_str = ',\n'.join(repr(track) for track in self.tracks)
+ tracks_str = '\n'.join(' ' + line for line in tracks_str.splitlines())
+ tracks_str = (', tracks=[\n%s\n]' % tracks_str) if self.tracks else ''
+ return 'MidiFile(type=%s, ticks_per_beat=%s%s)' % (
+ self.type, self.ticks_per_beat, tracks_str)
+
# The context manager has no purpose but is kept around since it was
# used in examples in the past.
def __enter__(self):
diff --git a/mido/midifiles/tracks.py b/mido/midifiles/tracks.py
index 59ad1df..6140688 100644
--- a/mido/midifiles/tracks.py
+++ b/mido/midifiles/tracks.py
@@ -50,9 +50,16 @@ class MidiTrack(list):
def __mul__(self, other):
return self.__class__(list.__mul__(self, other))
- def __repr__(self):
+ def __str__(self):
return '<midi track {!r} {} messages>'.format(self.name, len(self))
+ def __repr__(self):
+ messages = ''
+ if len(self) > 0:
+ template = '[\n %s]' if len(self) > 1 else '[%s]'
+ messages = template % ',\n '.join(repr(m) for m in self)
+ return 'MidiTrack(%s)' % messages
+
def _to_abstime(messages):
"""Convert messages to absolute time."""
| proper __repr__ for MidiFile, etc.
Per Python docs, `__repr__()` output "should normally look like a valid Python expression that can be used to recreate an object with the same value".
Currently for MidiFile, repr() gives the same result as str(), which is just a summary including number of tracks, etc. Likewise MidiTrack gives a summary with number of messages.
One place a proper repr() would be immediately useful is for tests, e.g. to confirm that code changes don't alter generated MIDI messages, tracks, or files. It would also facilitate viewing differences between MIDI data in text form.
With a proper repr(), it would also be trivial to implement MidiFile `__eq__` (see #150). | mido/mido | diff --git a/tests/messages/test_messages.py b/tests/messages/test_messages.py
index a834fbd..1b076c0 100644
--- a/tests/messages/test_messages.py
+++ b/tests/messages/test_messages.py
@@ -108,3 +108,9 @@ def test_dict_sysex_data():
def test_from_hex_sysex_data_type():
msg = Message.from_hex('F0 01 02 03 F7')
assert isinstance(msg.data, SysexData)
+
+
+def test_repr():
+ msg = Message('note_on', channel=1, note=2, time=3)
+ msg_eval = eval(repr(msg))
+ assert msg == msg_eval
diff --git a/tests/midifiles/test_meta.py b/tests/midifiles/test_meta.py
index 3ea3c71..4ac6145 100644
--- a/tests/midifiles/test_meta.py
+++ b/tests/midifiles/test_meta.py
@@ -1,5 +1,5 @@
import pytest
-from mido.midifiles.meta import MetaMessage, MetaSpec_key_signature, KeySignatureError
+from mido.midifiles.meta import MetaMessage, UnknownMetaMessage, MetaSpec_key_signature, KeySignatureError
def test_copy_invalid_argument():
@@ -30,3 +30,13 @@ class TestKeySignature:
msg = MetaMessage('key_signature')
MetaSpec_key_signature().decode(msg, input_bytes)
assert msg.key == expect_sig
+
+def test_meta_message_repr():
+ msg = MetaMessage('end_of_track', time=10)
+ msg_eval = eval(repr(msg))
+ assert msg == msg_eval
+
+def test_unknown_meta_message_repr():
+ msg = UnknownMetaMessage(type_byte=99, data=[1, 2], time=10)
+ msg_eval = eval(repr(msg))
+ assert msg == msg_eval
diff --git a/tests/midifiles/test_midifiles.py b/tests/midifiles/test_midifiles.py
index 8231e45..a579ed7 100644
--- a/tests/midifiles/test_midifiles.py
+++ b/tests/midifiles/test_midifiles.py
@@ -1,7 +1,7 @@
import io
from pytest import raises
from mido.messages import Message
-from mido.midifiles.midifiles import MidiFile
+from mido.midifiles.midifiles import MidiFile, MidiTrack
from mido.midifiles.meta import MetaMessage, KeySignatureError
HEADER_ONE_TRACK = """
@@ -163,3 +163,19 @@ def test_meta_messages_with_length_0():
MetaMessage('end_of_track'),
]
+
+
+def test_midifile_repr():
+ midifile = MidiFile(type=1, ticks_per_beat=123, tracks=[
+ MidiTrack([
+ Message('note_on', channel=1, note=2, time=3),
+ Message('note_off', channel=1, note=2, time=3)]),
+ MidiTrack([
+ MetaMessage('sequence_number', number=5),
+ Message('note_on', channel=2, note=6, time=9),
+ Message('note_off', channel=2, note=6, time=9)]),
+ ])
+ midifile_eval = eval(repr(midifile))
+ for track, track_eval in zip(midifile.tracks, midifile_eval.tracks):
+ for m1, m2 in zip(track, track_eval):
+ assert m1 == m2
diff --git a/tests/midifiles/test_tracks.py b/tests/midifiles/test_tracks.py
index 6dddcde..54467b8 100644
--- a/tests/midifiles/test_tracks.py
+++ b/tests/midifiles/test_tracks.py
@@ -1,6 +1,10 @@
+import itertools
+from mido.messages import Message
from mido.midifiles.meta import MetaMessage
from mido.midifiles.tracks import MidiTrack
+zip = getattr(itertools, 'izip', zip)
+
def test_track_slice():
track = MidiTrack()
@@ -16,3 +20,13 @@ def test_track_name():
# The track should use the first name it finds.
track = MidiTrack([name1, name2])
assert track.name == name1.name
+
+
+def test_track_repr():
+ track = MidiTrack([
+ Message('note_on', channel=1, note=2, time=3),
+ Message('note_off', channel=1, note=2, time=3),
+ ])
+ track_eval = eval(repr(track))
+ for m1, m2 in zip(track, track_eval):
+ assert m1 == m2
diff --git a/tests/test_frozen.py b/tests/test_frozen.py
index b26f010..0551d2f 100644
--- a/tests/test_frozen.py
+++ b/tests/test_frozen.py
@@ -1,4 +1,5 @@
from mido.messages import Message
+from mido.midifiles.meta import MetaMessage, UnknownMetaMessage
from mido.frozen import (is_frozen, freeze_message, thaw_message,
FrozenMessage, FrozenMetaMessage,
FrozenUnknownMetaMessage)
@@ -27,3 +28,24 @@ def test_thawed_message_is_copy():
def test_is_frozen():
assert is_frozen(FrozenMessage('note_on'))
assert not is_frozen(Message('note_on'))
+
+
+def test_frozen_repr():
+ msg = FrozenMessage('note_on', channel=1, note=2, time=3)
+ msg_eval = eval(repr(msg))
+ assert type(msg_eval) == Message
+ assert msg == msg_eval
+
+
+def test_frozen_meta_repr():
+ msg = FrozenMetaMessage('end_of_track', time=10)
+ msg_eval = eval(repr(msg))
+ assert type(msg_eval) == MetaMessage
+ assert msg == msg_eval
+
+
+def test_frozen_unknown_meta_repr():
+ msg = FrozenUnknownMetaMessage(type_byte=99, data=[1, 2], time=10)
+ msg_eval = eval(repr(msg))
+ assert type(msg_eval) == UnknownMetaMessage
+ assert msg == msg_eval
| {
"commit_name": "head_commit",
"failed_lite_validators": [
"has_issue_reference",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 1
},
"num_modified_files": 5
} | 1.2 | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest",
"check-manifest",
"flake8",
"sphinx"
],
"pre_install": null,
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | alabaster==0.7.16
babel==2.17.0
build==1.2.2.post1
cachetools==5.5.2
certifi==2025.1.31
chardet==5.2.0
charset-normalizer==3.4.1
check-manifest==0.50
colorama==0.4.6
distlib==0.3.9
docutils==0.21.2
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
filelock==3.18.0
flake8==7.2.0
idna==3.10
imagesize==1.4.1
importlib_metadata==8.6.1
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
Jinja2==3.1.6
MarkupSafe==3.0.2
mccabe==0.7.0
-e git+https://github.com/mido/mido.git@e87384d7e5d62de361a65ab6b1d5d62750475e84#egg=mido
packaging @ file:///croot/packaging_1734472117206/work
platformdirs==4.3.7
pluggy @ file:///croot/pluggy_1733169602837/work
pycodestyle==2.13.0
pyflakes==3.3.1
Pygments==2.19.1
pyproject-api==1.9.0
pyproject_hooks==1.2.0
pytest @ file:///croot/pytest_1738938843180/work
requests==2.32.3
snowballstemmer==2.2.0
Sphinx==7.4.7
sphinxcontrib-applehelp==2.0.0
sphinxcontrib-devhelp==2.0.0
sphinxcontrib-htmlhelp==2.1.0
sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==2.0.0
sphinxcontrib-serializinghtml==2.0.0
tomli==2.2.1
tox==4.25.0
typing_extensions==4.13.0
urllib3==2.3.0
virtualenv==20.29.3
zipp==3.21.0
| name: mido
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- alabaster==0.7.16
- babel==2.17.0
- build==1.2.2.post1
- cachetools==5.5.2
- certifi==2025.1.31
- chardet==5.2.0
- charset-normalizer==3.4.1
- check-manifest==0.50
- colorama==0.4.6
- distlib==0.3.9
- docutils==0.21.2
- filelock==3.18.0
- flake8==7.2.0
- idna==3.10
- imagesize==1.4.1
- importlib-metadata==8.6.1
- jinja2==3.1.6
- markupsafe==3.0.2
- mccabe==0.7.0
- platformdirs==4.3.7
- pycodestyle==2.13.0
- pyflakes==3.3.1
- pygments==2.19.1
- pyproject-api==1.9.0
- pyproject-hooks==1.2.0
- requests==2.32.3
- snowballstemmer==2.2.0
- sphinx==7.4.7
- sphinxcontrib-applehelp==2.0.0
- sphinxcontrib-devhelp==2.0.0
- sphinxcontrib-htmlhelp==2.1.0
- sphinxcontrib-jsmath==1.0.1
- sphinxcontrib-qthelp==2.0.0
- sphinxcontrib-serializinghtml==2.0.0
- tomli==2.2.1
- tox==4.25.0
- typing-extensions==4.13.0
- urllib3==2.3.0
- virtualenv==20.29.3
- zipp==3.21.0
prefix: /opt/conda/envs/mido
| [
"tests/messages/test_messages.py::test_repr",
"tests/midifiles/test_meta.py::test_meta_message_repr",
"tests/midifiles/test_meta.py::test_unknown_meta_message_repr",
"tests/midifiles/test_midifiles.py::test_midifile_repr",
"tests/midifiles/test_tracks.py::test_track_repr",
"tests/test_frozen.py::test_frozen_repr",
"tests/test_frozen.py::test_frozen_meta_repr",
"tests/test_frozen.py::test_frozen_unknown_meta_repr"
] | [] | [
"tests/messages/test_messages.py::test_msg_time_equality",
"tests/messages/test_messages.py::test_set_type",
"tests/messages/test_messages.py::test_encode_pitchwheel",
"tests/messages/test_messages.py::test_decode_pitchwheel",
"tests/messages/test_messages.py::test_encode_songpos",
"tests/messages/test_messages.py::test_decode_songpos",
"tests/messages/test_messages.py::test_sysex_data_is_sysexdata_object",
"tests/messages/test_messages.py::test_sysex_data_accepts_different_types",
"tests/messages/test_messages.py::test_copy",
"tests/messages/test_messages.py::test_init_invalid_argument",
"tests/messages/test_messages.py::test_copy_invalid_argument",
"tests/messages/test_messages.py::test_copy_cant_change_type",
"tests/messages/test_messages.py::test_copy_can_have_same_type",
"tests/messages/test_messages.py::test_copy_handles_data_generator",
"tests/messages/test_messages.py::test_compare_with_nonmessage",
"tests/messages/test_messages.py::test_from_dict_default_values",
"tests/messages/test_messages.py::test_dict_sysex_data",
"tests/messages/test_messages.py::test_from_hex_sysex_data_type",
"tests/midifiles/test_meta.py::test_copy_invalid_argument",
"tests/midifiles/test_meta.py::test_copy_cant_override_type",
"tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig0]",
"tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig1]",
"tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig2]",
"tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig3]",
"tests/midifiles/test_meta.py::TestKeySignature::test_bad_key_sig_throws_key_signature_error[bad_key_sig4]",
"tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes0-C]",
"tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes1-Am]",
"tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes2-Cb]",
"tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes3-Abm]",
"tests/midifiles/test_meta.py::TestKeySignature::test_key_signature[input_bytes4-A#m]",
"tests/midifiles/test_midifiles.py::test_no_tracks",
"tests/midifiles/test_midifiles.py::test_single_message",
"tests/midifiles/test_midifiles.py::test_too_long_message",
"tests/midifiles/test_midifiles.py::test_two_tracks",
"tests/midifiles/test_midifiles.py::test_empty_file",
"tests/midifiles/test_midifiles.py::test_eof_in_track",
"tests/midifiles/test_midifiles.py::test_invalid_data_byte_no_clipping",
"tests/midifiles/test_midifiles.py::test_invalid_data_byte_with_clipping_high",
"tests/midifiles/test_midifiles.py::test_meta_messages",
"tests/midifiles/test_midifiles.py::test_meta_message_bad_key_sig_throws_key_signature_error_sharps",
"tests/midifiles/test_midifiles.py::test_meta_message_bad_key_sig_throws_key_signature_error_flats",
"tests/midifiles/test_midifiles.py::test_meta_messages_with_length_0",
"tests/midifiles/test_tracks.py::test_track_slice",
"tests/midifiles/test_tracks.py::test_track_name",
"tests/test_frozen.py::test_hashability",
"tests/test_frozen.py::test_freeze_and_thaw",
"tests/test_frozen.py::test_thawed_message_is_copy",
"tests/test_frozen.py::test_is_frozen"
] | [] | MIT License | 2,654 | 1,516 | [
"mido/frozen.py",
"mido/messages/messages.py",
"mido/midifiles/meta.py",
"mido/midifiles/midifiles.py",
"mido/midifiles/tracks.py"
] |
|
google__docker-explorer-31 | 80376ae9503280241d6c14838a69d026f0987da9 | 2018-06-12 12:02:11 | 80376ae9503280241d6c14838a69d026f0987da9 | diff --git a/docker_explorer/de.py b/docker_explorer/de.py
index ea8afa3..1e78818 100644
--- a/docker_explorer/de.py
+++ b/docker_explorer/de.py
@@ -24,7 +24,6 @@ import os
from docker_explorer import errors
from docker_explorer.lib import container
-from docker_explorer.lib import storage
from docker_explorer.lib import utils
@@ -34,8 +33,6 @@ class DockerExplorer(object):
Attributes:
docker_directory (str): Path to use as the root of the Docker runtime.
Default is '/var/lib/docker'.
- storage_object (lib.Storage): The object implementing the methods for
- exploring the Docker containers.
"""
def __init__(self):
@@ -45,16 +42,12 @@ class DockerExplorer(object):
self.containers_directory = None
self.docker_directory = None
self.docker_version = 2
- self.storage_object = None
def _SetDockerDirectory(self, docker_path):
"""Sets the Docker main directory.
Args:
docker_path(str): the absolute path to the docker directory.
-
- Raises:
- errors.BadStorageException: If the storage backend couldn't be detected.
"""
self.docker_directory = docker_path
if not os.path.isdir(self.docker_directory):
@@ -67,29 +60,6 @@ class DockerExplorer(object):
self.containers_directory = os.path.join(
self.docker_directory, 'containers')
- if os.path.isfile(
- os.path.join(self.docker_directory, 'repositories-aufs')):
- # TODO: check this agains other storages in version 1.9 and below
- self.docker_version = 1
- self.storage_object = storage.AufsStorage(
- docker_directory=self.docker_directory, docker_version=1)
- elif os.path.isdir(os.path.join(self.docker_directory, 'overlay2')):
- self.storage_object = storage.Overlay2Storage(
- docker_directory=self.docker_directory)
- elif os.path.isdir(os.path.join(self.docker_directory, 'overlay')):
- self.storage_object = storage.OverlayStorage(
- docker_directory=self.docker_directory)
- elif os.path.isdir(os.path.join(self.docker_directory, 'aufs')):
- self.storage_object = storage.AufsStorage(
- docker_directory=self.docker_directory)
- if self.storage_object is None:
- err_message = (
- 'Could not detect storage system. '
- 'Make sure the docker directory ({0:s}) is correct. '
- 'If it is correct, you might want to run this script'
- ' with higher privileges.'.format(self.docker_directory))
- raise errors.BadStorageException(err_message)
-
def AddBasicOptions(self, argument_parser):
"""Adds the global options to the argument_parser.
@@ -218,7 +188,7 @@ class DockerExplorer(object):
mountpoint (str): the path to the destination mount point.
"""
container_object = self.GetContainer(container_id)
- self.storage_object.Mount(container_object, mountpoint)
+ container_object.Mount(mountpoint)
def GetContainersString(self, only_running=False):
"""Returns a string describing the running containers.
@@ -260,10 +230,6 @@ class DockerExplorer(object):
"""
print(self.GetContainersString(only_running=only_running))
- def ShowRepositories(self):
- """Displays information about the images in the Docker repository."""
- print(self.storage_object.ShowRepositories())
-
def ShowHistory(self, container_id, show_empty_layers=False):
"""Prints the modification history of a container.
@@ -274,6 +240,33 @@ class DockerExplorer(object):
container_object = self.GetContainer(container_id)
print(container_object.GetHistory(show_empty_layers))
+ def GetRepositoriesString(self):
+ """Returns information about images in the local Docker repositories.
+
+ Returns:
+ str: human readable list of images in local Docker repositories.
+ """
+ result_string = ''
+ repositories = []
+ if self.docker_version == 1:
+ repositories = [os.path.join(self.docker_directory, 'repositories-aufs')]
+ else:
+ image_path = os.path.join(self.docker_directory, 'image')
+ for storage_method in os.listdir(image_path):
+ repositories_file_path = os.path.join(
+ image_path, storage_method, 'repositories.json')
+ if os.path.isfile(repositories_file_path):
+ repositories.append(repositories_file_path)
+
+ for repositories_file_path in repositories:
+ result_string += (
+ 'Listing repositories from file {0:s}\n'.format(
+ repositories_file_path))
+ with open(repositories_file_path) as rf:
+ result_string += utils.PrettyPrintJSON(rf.read())
+
+ return result_string
+
def Main(self):
"""The main method for the DockerExplorer class.
@@ -287,7 +280,6 @@ class DockerExplorer(object):
self._SetDockerDirectory(self.docker_directory)
-
if options.command == 'mount':
self.Mount(options.container_id, options.mountpoint)
@@ -301,7 +293,7 @@ class DockerExplorer(object):
elif options.what == 'running_containers':
self.ShowContainers(only_running=True)
elif options.what == 'repositories':
- self.ShowRepositories()
+ print(self.GetRepositoriesString())
else:
raise ValueError('Unhandled command %s' % options.command)
diff --git a/docker_explorer/lib/container.py b/docker_explorer/lib/container.py
index 4321385..04cec32 100644
--- a/docker_explorer/lib/container.py
+++ b/docker_explorer/lib/container.py
@@ -18,8 +18,10 @@ from __future__ import print_function, unicode_literals
import json
import os
+import subprocess
from docker_explorer import errors
+from docker_explorer.lib import storage
from docker_explorer.lib import utils
@@ -38,11 +40,18 @@ class Container(object):
name (str): the name of the container.
running (boolean): True if the container is running.
start_timestamp (str): the container's start timestamp.
- storage_driver (str): the container's storage driver.
+ storage_name (str): the container's storage driver name.
+ storage_object (BaseStorage): the container's storage backend object.
volumes (list(tuple)): list of mount points to bind from host to the
container. (Docker storage backend v1).
"""
+ STORAGES_MAP = {
+ 'aufs': storage.AufsStorage,
+ 'overlay': storage.OverlayStorage,
+ 'overlay2': storage.Overlay2Storage
+ }
+
def __init__(self, docker_directory, container_id, docker_version=2):
"""Initializes the Container class.
@@ -88,16 +97,18 @@ class Container(object):
if json_state:
self.running = json_state.get('Running', False)
self.start_timestamp = json_state.get('StartedAt', False)
- self.storage_driver = container_info_dict.get('Driver', None)
- if self.storage_driver is None:
+ self.storage_name = container_info_dict.get('Driver', None)
+ if self.storage_name is None:
raise errors.BadContainerException(
'{0} container config file lacks Driver key'.format(
container_info_json_path))
+
+ self._SetStorage(self.storage_name)
self.volumes = container_info_dict.get('Volumes', None)
if self.docker_version == 2:
c_path = os.path.join(
- self.docker_directory, 'image', self.storage_driver, 'layerdb',
+ self.docker_directory, 'image', self.storage_name, 'layerdb',
'mounts', container_id)
with open(os.path.join(c_path, 'mount-id')) as mount_id_file:
self.mount_id = mount_id_file.read()
@@ -131,7 +142,7 @@ class Container(object):
elif self.docker_version == 2:
hash_method, layer_id = layer_id.split(':')
layer_info_path = os.path.join(
- self.docker_directory, 'image', self.storage_driver, 'imagedb',
+ self.docker_directory, 'image', self.storage_name, 'imagedb',
'content', hash_method, layer_id)
if os.path.isfile(layer_info_path):
with open(layer_info_path) as layer_info_file:
@@ -162,7 +173,7 @@ class Container(object):
elif self.docker_version == 2:
hash_method, layer_id = current_layer.split(':')
parent_layer_path = os.path.join(
- self.docker_directory, 'image', self.storage_driver, 'imagedb',
+ self.docker_directory, 'image', self.storage_name, 'imagedb',
'metadata', hash_method, layer_id, 'parent')
if not os.path.isfile(parent_layer_path):
break
@@ -204,3 +215,41 @@ class Container(object):
else:
history_str += 'Empty layer'
return history_str
+
+ def _SetStorage(self, storage_name):
+ """Sets the storage_object attribute.
+
+ Args:
+ storage_name (str): the name of the storage.
+ Returns:
+ BaseStorage: a storage object.
+ Raises:
+ BadContainerException: if no storage Driver is defined, or if it is not
+ implemented
+ """
+ storage_class = self.STORAGES_MAP.get(storage_name, None)
+
+ if storage_class is None:
+ raise errors.BadContainerException(
+ 'Storage driver {0} is not implemented'.format(storage_name))
+
+ self.storage_object = storage_class(
+ self.docker_directory, self.docker_version)
+
+ def Mount(self, mount_dir):
+ """Mounts the specified container's filesystem.
+
+ Args:
+ mount_dir (str): the path to the destination mount point
+ """
+
+ commands = self.storage_object.MakeMountCommands(self, mount_dir)
+ for c in commands:
+ print(c)
+ print('Do you want to mount this container ID: {0:s} on {1:s} ?\n'
+ '(ie: run these commands) [Y/n]'.format(self.container_id, mount_dir))
+ choice = raw_input().lower()
+ if not choice in ['y', 'yes', '']:
+ for c in commands:
+ # TODO() this is quite unsafe, need to properly split args
+ subprocess.call(c, shell=True)
diff --git a/docker_explorer/lib/storage.py b/docker_explorer/lib/storage.py
index 1735fa2..2cc96c1 100644
--- a/docker_explorer/lib/storage.py
+++ b/docker_explorer/lib/storage.py
@@ -17,11 +17,8 @@
from __future__ import print_function, unicode_literals
import os
-import subprocess
import sys
-from docker_explorer.lib import utils
-
class BaseStorage(object):
"""This class provides tools to list and access containers metadata.
@@ -51,24 +48,6 @@ class BaseStorage(object):
if self.docker_version == 1:
self.container_config_filename = 'config.json'
- def ShowRepositories(self):
- """Returns information about the images in the Docker repository.
-
- Returns:
- str: human readable information about image repositories.
- """
- repositories_file_path = os.path.join(
- self.docker_directory, 'image', self.STORAGE_METHOD,
- 'repositories.json')
- if self.docker_version == 1:
- repositories_file_path = os.path.join(
- self.docker_directory, 'repositories-aufs')
- result_string = (
- 'Listing repositories from file {0:s}').format(repositories_file_path)
- with open(repositories_file_path) as rf:
- repositories_string = rf.read()
- return result_string + utils.PrettyPrintJSON(repositories_string)
-
def MakeMountCommands(self, container_object, mount_dir):
"""Generates the required shell commands to mount a container given its ID.
@@ -123,25 +102,6 @@ class BaseStorage(object):
return extra_commands
- def Mount(self, container_object, mount_dir):
- """Mounts the specified container's filesystem.
-
- Args:
- container_object (Container): the container.
- mount_dir (str): the path to the destination mount point
- """
-
- commands = self.MakeMountCommands(container_object, mount_dir)
- for c in commands:
- print(c)
- print('Do you want to mount this container Id: {0:s} on {1:s} ?\n'
- '(ie: run these commands) [Y/n]'.format(
- container_object.container_id, mount_dir))
- choice = raw_input().lower()
- if not choice or choice == 'y' or choice == 'yes':
- for c in commands:
- # TODO(romaing) this is quite unsafe, need to properly split args
- subprocess.call(c, shell=True)
class AufsStorage(BaseStorage):
"""This class implements AuFS storage specific methods."""
diff --git a/docker_explorer/lib/utils.py b/docker_explorer/lib/utils.py
index b4e9db3..cdfb2b8 100644
--- a/docker_explorer/lib/utils.py
+++ b/docker_explorer/lib/utils.py
@@ -44,5 +44,6 @@ def PrettyPrintJSON(string):
Returns:
str: pretty printed JSON string.
"""
- return json.dumps(
+ pretty_json = json.dumps(
json.loads(string), sort_keys=True, indent=4, separators=(', ', ': '))
+ return pretty_json + '\n'
| Better detect overlay/overlay2 layouts
When processing an image that has both and "overlay2" and "overlay" folder it will take the "overlay2" folder by default even though there is no 'mounts' subfolder. | google/docker-explorer | diff --git a/tests.py b/tests.py
index f7a79f4..99ca551 100644
--- a/tests.py
+++ b/tests.py
@@ -47,7 +47,7 @@ class UtilsTests(unittest.TestCase):
test_json = json.dumps(test_dict)
expected_string = ('{\n "test": [\n {\n "dict1": {\n'
' "key1": "val1"\n }, \n'
- ' "dict2": null\n }\n ]\n}')
+ ' "dict2": null\n }\n ]\n}\n')
self.assertEqual(expected_string, utils.PrettyPrintJSON(test_json))
@@ -89,8 +89,8 @@ class TestDEMain(unittest.TestCase):
self.assertEqual(expected_error_message, err.exception.message)
-class StorageTestCase(unittest.TestCase):
- """Base class for tests of different BaseStorage implementations."""
+class DockerTestCase(unittest.TestCase):
+ """Base class for tests of different Storage implementations."""
@classmethod
def tearDownClass(cls):
@@ -98,7 +98,7 @@ class StorageTestCase(unittest.TestCase):
@classmethod
def _setup(cls, driver, driver_class):
- """Internal method to set up the TestCase on a specific storate."""
+ """Internal method to set up the TestCase on a specific storage."""
cls.driver = driver
docker_directory_path = os.path.join('test_data', 'docker')
if not os.path.isdir(docker_directory_path):
@@ -113,17 +113,17 @@ class StorageTestCase(unittest.TestCase):
def testDetectStorage(self):
"""Tests the DockerExplorer.DetectStorage function."""
- storage_object = self.de_object.storage_object
- self.assertIsNotNone(storage_object)
- self.assertIsInstance(storage_object, self.driver_class)
- self.assertEqual(storage_object.STORAGE_METHOD, self.driver)
+ for container_obj in self.de_object.GetAllContainers():
+ self.assertIsNotNone(container_obj.storage_object)
+ self.assertEqual(container_obj.storage_name, self.driver)
+ self.assertIsInstance(container_obj.storage_object, self.driver_class)
- self.assertEqual(2, storage_object.docker_version)
- self.assertEqual('config.v2.json',
- self.de_object.container_config_filename)
+ self.assertEqual(2, container_obj.docker_version)
+ self.assertEqual(
+ 'config.v2.json', container_obj.container_config_filename)
-class TestAufsStorage(StorageTestCase):
+class TestAufsStorage(DockerTestCase):
"""Tests methods in the BaseStorage object."""
@classmethod
@@ -198,12 +198,13 @@ class TestAufsStorage(StorageTestCase):
self.assertEqual(['/bin/sh', '-c', '#(nop) ', 'CMD ["sh"]'],
layer_info['container_config']['Cmd'])
- def testShowRepositories(self):
- """Tests the BaseStorage.ShowRepositories function on a AUFS storage."""
- result_string = self.de_object.storage_object.ShowRepositories()
+ def testGetRepositoriesString(self):
+ """Tests BaseStorage.GetRepositoriesString() on a AUFS storage."""
+ self.maxDiff = None
+ result_string = self.de_object.GetRepositoriesString()
expected_string = (
'Listing repositories from file '
- 'test_data/docker/image/aufs/repositories.json{\n'
+ 'test_data/docker/image/aufs/repositories.json\n{\n'
' "Repositories": {\n'
' "busybox": {\n'
' "busybox:latest": '
@@ -211,7 +212,7 @@ class TestAufsStorage(StorageTestCase):
'68"\n'
' }\n'
' }\n'
- '}')
+ '}\n')
self.assertEqual(expected_string, result_string)
def testMakeMountCommands(self):
@@ -219,7 +220,7 @@ class TestAufsStorage(StorageTestCase):
container_id = (
'7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966')
container_obj = self.de_object.GetContainer(container_id)
- commands = self.de_object.storage_object.MakeMountCommands(
+ commands = container_obj.storage_object.MakeMountCommands(
container_obj, '/mnt')
expected_commands = [
('mount -t aufs -o ro,br=test_data/docker/aufs/diff/test_data/docker/'
@@ -253,7 +254,7 @@ class TestAufsStorage(StorageTestCase):
self.assertEqual(expected_string, container_obj.GetHistory())
-class TestOverlayStorage(StorageTestCase):
+class TestOverlayStorage(DockerTestCase):
"""Tests methods in the OverlayStorage object."""
@classmethod
@@ -329,13 +330,13 @@ class TestOverlayStorage(StorageTestCase):
self.assertEqual(['/bin/sh', '-c', '#(nop) ', 'CMD ["sh"]'],
layer_info['container_config']['Cmd'])
- def testShowRepositories(self):
- """Tests the BaseStorage.ShowRepositories function on a Overlay storage."""
- result_string = self.de_object.storage_object.ShowRepositories()
+ def testGetRepositoriesString(self):
+ """Tests BaseStorage.GetRepositoriesString() on a Overlay storage."""
+ result_string = self.de_object.GetRepositoriesString()
self.maxDiff = None
expected_string = (
'Listing repositories from file '
- 'test_data/docker/image/overlay/repositories.json{\n'
+ 'test_data/docker/image/overlay/repositories.json\n{\n'
' "Repositories": {\n'
' "busybox": {\n'
' "busybox:latest": "sha256:'
@@ -346,7 +347,7 @@ class TestOverlayStorage(StorageTestCase):
'2c3"\n'
' }\n'
' }\n'
- '}')
+ '}\n')
self.assertEqual(expected_string, result_string)
def testMakeMountCommands(self):
@@ -354,7 +355,7 @@ class TestOverlayStorage(StorageTestCase):
container_id = (
'5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a')
container_obj = self.de_object.GetContainer(container_id)
- commands = self.de_object.storage_object.MakeMountCommands(
+ commands = container_obj.storage_object.MakeMountCommands(
container_obj, '/mnt')
expected_commands = [(
'mount -t overlay overlay -o ro,lowerdir='
@@ -381,7 +382,7 @@ class TestOverlayStorage(StorageTestCase):
self.assertEqual(expected_string, container_obj.GetHistory())
-class TestOverlay2Storage(StorageTestCase):
+class TestOverlay2Storage(DockerTestCase):
"""Tests methods in the Overlay2Storage object."""
@classmethod
@@ -457,13 +458,13 @@ class TestOverlay2Storage(StorageTestCase):
self.assertEqual(['/bin/sh', '-c', '#(nop) ', 'CMD ["sh"]'],
layer_info['container_config']['Cmd'])
- def testShowRepositories(self):
- """Tests the BaseStorage.ShowRepositories function on a Overlay2 storage."""
- result_string = self.de_object.storage_object.ShowRepositories()
+ def testGetRepositoriesString(self):
+ """Tests BaseStorage.GetRepositoriesString() on a Overlay2 storage."""
+ result_string = self.de_object.GetRepositoriesString()
self.maxDiff = None
expected_string = (
'Listing repositories from file '
- 'test_data/docker/image/overlay2/repositories.json{\n'
+ 'test_data/docker/image/overlay2/repositories.json\n{\n'
' "Repositories": {\n'
' "busybox": {\n'
' "busybox:latest": "sha256:'
@@ -474,7 +475,14 @@ class TestOverlay2Storage(StorageTestCase):
'c7"\n'
' }\n'
' }\n'
- '}')
+ '}\n'
+ 'Listing repositories from file '
+ 'test_data/docker/image/overlay/repositories.json\n'
+ '{\n'
+ ' "Repositories": {}\n'
+ '}\n'
+
+ )
self.assertEqual(expected_string, result_string)
def testMakeMountCommands(self):
@@ -483,7 +491,7 @@ class TestOverlay2Storage(StorageTestCase):
container_id = (
'8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206')
container_obj = self.de_object.GetContainer(container_id)
- commands = self.de_object.storage_object.MakeMountCommands(
+ commands = container_obj.storage_object.MakeMountCommands(
container_obj, '/mnt')
expected_commands = [(
'mount -t overlay overlay -o ro,lowerdir='
@@ -511,7 +519,7 @@ class TestOverlay2Storage(StorageTestCase):
'with command : /bin/sh -c #(nop) CMD ["sh"]')
self.assertEqual(expected_string, container_obj.GetHistory(container_obj))
-del StorageTestCase
+del DockerTestCase
if __name__ == '__main__':
unittest.main()
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 1,
"test_score": 0
},
"num_modified_files": 4
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "pytest",
"pip_packages": [
"pytest"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": null,
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | -e git+https://github.com/google/docker-explorer.git@80376ae9503280241d6c14838a69d026f0987da9#egg=docker_explorer
exceptiongroup @ file:///croot/exceptiongroup_1706031385326/work
iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
packaging @ file:///croot/packaging_1734472117206/work
pluggy @ file:///croot/pluggy_1733169602837/work
pytest @ file:///croot/pytest_1738938843180/work
tomli @ file:///opt/conda/conda-bld/tomli_1657175507142/work
| name: docker-explorer
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- exceptiongroup=1.2.0=py39h06a4308_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- packaging=24.2=py39h06a4308_0
- pip=25.0=py39h06a4308_0
- pluggy=1.5.0=py39h06a4308_0
- pytest=8.3.4=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tomli=2.0.1=py39h06a4308_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
prefix: /opt/conda/envs/docker-explorer
| [
"tests.py::UtilsTests::testPrettyPrintJSON",
"tests.py::TestAufsStorage::testDetectStorage",
"tests.py::TestAufsStorage::testGetRepositoriesString",
"tests.py::TestAufsStorage::testMakeMountCommands",
"tests.py::TestOverlayStorage::testDetectStorage",
"tests.py::TestOverlayStorage::testGetRepositoriesString",
"tests.py::TestOverlayStorage::testMakeMountCommands",
"tests.py::TestOverlay2Storage::testDetectStorage",
"tests.py::TestOverlay2Storage::testMakeMountCommands"
] | [
"tests.py::TestOverlay2Storage::testGetRepositoriesString"
] | [
"tests.py::UtilsTests::testFormatDatetime",
"tests.py::TestDEMain::testDetectStorageFail",
"tests.py::TestDEMain::testParseArguments",
"tests.py::TestAufsStorage::testGetAllContainers",
"tests.py::TestAufsStorage::testGetContainersString",
"tests.py::TestAufsStorage::testGetHistory",
"tests.py::TestAufsStorage::testGetLayerInfo",
"tests.py::TestAufsStorage::testGetOrderedLayers",
"tests.py::TestAufsStorage::testGetRunningContainersList",
"tests.py::TestOverlayStorage::testGetAllContainers",
"tests.py::TestOverlayStorage::testGetContainersString",
"tests.py::TestOverlayStorage::testGetHistory",
"tests.py::TestOverlayStorage::testGetLayerInfo",
"tests.py::TestOverlayStorage::testGetOrderedLayers",
"tests.py::TestOverlayStorage::testGetRunningContainersList",
"tests.py::TestOverlay2Storage::testGetAllContainers",
"tests.py::TestOverlay2Storage::testGetContainersString",
"tests.py::TestOverlay2Storage::testGetHistory",
"tests.py::TestOverlay2Storage::testGetLayerInfo",
"tests.py::TestOverlay2Storage::testGetOrderedLayers",
"tests.py::TestOverlay2Storage::testGetRunningContainersList"
] | [] | Apache License 2.0 | 2,657 | 3,079 | [
"docker_explorer/de.py",
"docker_explorer/lib/container.py",
"docker_explorer/lib/storage.py",
"docker_explorer/lib/utils.py"
] |
|
AmiiThinks__driving_gridworld-13 | fbc47c68cfade4e7d95ba59a3990dfef196389a6 | 2018-06-12 21:08:06 | fbc47c68cfade4e7d95ba59a3990dfef196389a6 | diff --git a/driving_gridworld/road.py b/driving_gridworld/road.py
index cb519ef..559362f 100644
--- a/driving_gridworld/road.py
+++ b/driving_gridworld/road.py
@@ -142,13 +142,12 @@ def combinations(iterable, r, collection=tuple):
class Road(object):
- def __init__(self, num_rows, car, obstacles, speed_limit):
- if speed_limit < car.speed:
+ def __init__(self, num_rows, car, obstacles):
+ if num_rows + 1 < car.speed:
raise ValueError("Car's speed above speed limit!")
self._num_rows = num_rows
self._num_columns = 4
self._car = car
- self._speed_limit = speed_limit
self._obstacles = obstacles
self._available_spaces = {}
for pos in product(range(0, self._car.speed), range(4)):
@@ -159,6 +158,20 @@ class Road(object):
if disallowed_position in self._available_spaces:
del self._available_spaces[disallowed_position]
+ def speed_limit(self):
+ '''The hard speed limit on this road.
+
+ Taking the `UP` action when traveling at the speed limit has no effect.
+
+ Set according to the headlight range since overdriving the
+ headlights too much breaks the physical plausibility of the game
+ due to the way we reusing obstacles to simulate arbitrarily long
+ roads with many obstacles. This is not too much of a restriction
+ though because even overdriving the headlights by one unit is
+ completely unsafe.
+ '''
+ return self._num_rows + 1
+
def obstacle_outside_car_path(self, obstacle):
return (obstacle.col < 0 or obstacle.col >= self._num_columns
or obstacle.row >= self._num_rows)
@@ -198,7 +211,7 @@ class Road(object):
state. The reward function is deterministic.
'''
- next_car = self._car.next(action, self._speed_limit)
+ next_car = self._car.next(action, self.speed_limit())
for positions, reveal_indices in (
self.every_combination_of_revealed_obstacles()):
@@ -225,8 +238,7 @@ class Road(object):
reward += self._car.reward()
if self._car.col == 0 or self._car.col == 3:
reward -= 4 * self._car.speed
- next_road = self.__class__(self._num_rows, next_car,
- next_obstacles, self._speed_limit)
+ next_road = self.__class__(self._num_rows, next_car, next_obstacles)
yield (next_road, prob, reward)
def to_key(self, show_walls=False):
| Enforce a hard limit on the speed limit in `Road` to the number of rows + 1
If the speed limit is larger than this, then the physical plausibility of the similar breaks, because the number of possible obstacle encounters across a fixed distance can depend on the car's speed and the range of its headlights (the number of rows). | AmiiThinks/driving_gridworld | diff --git a/test/road_test.py b/test/road_test.py
index ae22a47..d8aeb36 100644
--- a/test/road_test.py
+++ b/test/road_test.py
@@ -9,9 +9,8 @@ import pytest
def test_transition_probs_without_obstacles_are_always_1():
num_rows = 4
obstacles = []
- speed_limit = 1
car_inst = Car(0, 0, 1)
- road_test = Road(num_rows, car_inst, obstacles, speed_limit)
+ road_test = Road(num_rows, car_inst, obstacles)
for a in ACTIONS:
for next_state, prob, reward in road_test.successors(a):
@@ -21,9 +20,7 @@ def test_transition_probs_without_obstacles_are_always_1():
@pytest.mark.parametrize("obst", [Bump(0, 0), Pedestrian(0, 0)])
def test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_all_obstacles_already_on_road(obst):
num_rows = 2
- speed_limit = 1
-
- road_test = Road(num_rows, Car(1, 1, 1), [obst], speed_limit)
+ road_test = Road(num_rows, Car(1, 1, 1), [obst])
patient = [
(positions, reveal_indices)
for positions, reveal_indices in
@@ -36,9 +33,7 @@ def test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_
@pytest.mark.parametrize("action", ACTIONS)
def test_transition_probs_with_one_obstacle_are_1(obst, action):
num_rows = 2
- speed_limit = 1
-
- road_test = Road(num_rows, Car(1, 1, 1), [obst], speed_limit)
+ road_test = Road(num_rows, Car(1, 1, 1), [obst])
probs = [
prob
for next_state, prob, reward in road_test.successors(action)
@@ -50,9 +45,7 @@ def test_transition_probs_with_one_obstacle_are_1(obst, action):
@pytest.mark.parametrize("action", ACTIONS)
def test_transition_probs_with_invisible_obstacle(obst, action):
num_rows = 2
- speed_limit = 1
-
- road_test = Road(num_rows, Car(1, 1, 1), [obst], speed_limit)
+ road_test = Road(num_rows, Car(1, 1, 1), [obst])
probs = [
prob
for next_state, prob, reward in road_test.successors(action)
@@ -72,9 +65,8 @@ def test_transition_probs_with_invisible_obstacle(obst, action):
def test_driving_faster_gives_a_larger_reward(action, current_speed):
num_rows = 4
obstacles = []
- speed_limit = 4
car = Car(0, 1, current_speed)
- road_test = Road(num_rows, car, obstacles, speed_limit)
+ road_test = Road(num_rows, car, obstacles)
for next_state, prob, reward in road_test.successors(action):
assert reward == float(current_speed)
@@ -82,12 +74,10 @@ def test_driving_faster_gives_a_larger_reward(action, current_speed):
def test_road_cannot_start_with_car_going_faster_than_speed_limit():
num_rows = 4
obstacles = []
- speed_limit = 1
- current_speed = 2
+ current_speed = 6
car = Car(0, 0, current_speed)
-
with pytest.raises(ValueError):
- road_test = Road(num_rows, car, obstacles, speed_limit)
+ road_test = Road(num_rows, car, obstacles)
@pytest.mark.parametrize("car", [Car(0, 0, 1), Car(0, 3, 1)])
@@ -95,20 +85,28 @@ def test_road_cannot_start_with_car_going_faster_than_speed_limit():
def test_receive_negative_reward_for_driving_off_the_road(car, action):
num_rows = 4
obstacles = []
- speed_limit = 2
- road_test = Road(num_rows, car, obstacles, speed_limit)
+ road_test = Road(num_rows, car, obstacles)
for next_state, prob, reward in road_test.successors(action):
assert reward < 0
+
+
@pytest.mark.parametrize("obst", [Bump(-1, -1), Pedestrian(0, -1)])
@pytest.mark.parametrize("action", ACTIONS)
@pytest.mark.parametrize("speed", [1, 2, 3])
def test_number_of_successors_invisible_obstacle_and_variable_speeds(
obst, action, speed):
num_rows = 2
- speed_limit = 3
- road_test = Road(num_rows, Car(1, 1, speed), [obst], speed_limit)
+ road_test = Road(num_rows, Car(1, 1, speed), [obst])
probs = [
prob
for next_state, prob, reward in road_test.successors(action)
]
assert len(probs) == 4 * speed + 1
+
+
+def test_speed_limit_equals_number_of_rows_plus_one():
+ num_rows = 2
+ obstacles = []
+ car = Car(0, 0, 1)
+ road_test = Road(num_rows, car, obstacles)
+ assert road_test.speed_limit() == num_rows + 1
| {
"commit_name": "merge_commit",
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false,
"llm_score": {
"difficulty_score": 1,
"issue_text_score": 0,
"test_score": 0
},
"num_modified_files": 1
} | unknown | {
"env_vars": null,
"env_yml_path": null,
"install": "pip install -e .[dev]",
"log_parser": "parse_log_pytest",
"no_use_env": null,
"packages": "requirements.txt",
"pip_packages": [
"pytest",
"pytest-cov"
],
"pre_install": [
"apt-get update",
"apt-get install -y gcc"
],
"python": "3.9",
"reqs_path": [
"requirements.txt"
],
"test_cmd": "pytest --no-header -rA --tb=line --color=no -p no:cacheprovider -W ignore::DeprecationWarning"
} | coverage==7.8.0
-e git+https://github.com/AmiiThinks/driving_gridworld.git@fbc47c68cfade4e7d95ba59a3990dfef196389a6#egg=driving_gridworld
exceptiongroup==1.2.2
fire==0.7.0
future==0.15.2
iniconfig==2.1.0
numpy==2.0.2
packaging==24.2
pluggy==1.5.0
pycolab==1.2
pytest==8.3.5
pytest-cov==6.0.0
six==1.17.0
termcolor==3.0.0
tomli==2.2.1
| name: driving_gridworld
channels:
- defaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
- conda-forge
dependencies:
- _libgcc_mutex=0.1=main
- _openmp_mutex=5.1=1_gnu
- ca-certificates=2025.2.25=h06a4308_0
- ld_impl_linux-64=2.40=h12ee557_0
- libffi=3.4.4=h6a678d5_1
- libgcc-ng=11.2.0=h1234567_1
- libgomp=11.2.0=h1234567_1
- libstdcxx-ng=11.2.0=h1234567_1
- ncurses=6.4=h6a678d5_0
- openssl=3.0.16=h5eee18b_0
- pip=25.0=py39h06a4308_0
- python=3.9.21=he870216_1
- readline=8.2=h5eee18b_0
- setuptools=75.8.0=py39h06a4308_0
- sqlite=3.45.3=h5eee18b_0
- tk=8.6.14=h39e8969_0
- tzdata=2025a=h04d1e81_0
- wheel=0.45.1=py39h06a4308_0
- xz=5.6.4=h5eee18b_1
- zlib=1.2.13=h5eee18b_1
- pip:
- coverage==7.8.0
- exceptiongroup==1.2.2
- fire==0.7.0
- future==0.15.2
- iniconfig==2.1.0
- numpy==2.0.2
- packaging==24.2
- pluggy==1.5.0
- pycolab==1.2
- pytest==8.3.5
- pytest-cov==6.0.0
- six==1.17.0
- termcolor==3.0.0
- tomli==2.2.1
prefix: /opt/conda/envs/driving_gridworld
| [
"test/road_test.py::test_transition_probs_without_obstacles_are_always_1",
"test/road_test.py::test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_all_obstacles_already_on_road[obst0]",
"test/road_test.py::test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_all_obstacles_already_on_road[obst1]",
"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[0-obst0]",
"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[0-obst1]",
"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[1-obst0]",
"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[1-obst1]",
"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[2-obst0]",
"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[2-obst1]",
"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[3-obst0]",
"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[3-obst1]",
"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[4-obst0]",
"test/road_test.py::test_transition_probs_with_one_obstacle_are_1[4-obst1]",
"test/road_test.py::test_transition_probs_with_invisible_obstacle[0-obst0]",
"test/road_test.py::test_transition_probs_with_invisible_obstacle[0-obst1]",
"test/road_test.py::test_transition_probs_with_invisible_obstacle[1-obst0]",
"test/road_test.py::test_transition_probs_with_invisible_obstacle[1-obst1]",
"test/road_test.py::test_transition_probs_with_invisible_obstacle[2-obst0]",
"test/road_test.py::test_transition_probs_with_invisible_obstacle[2-obst1]",
"test/road_test.py::test_transition_probs_with_invisible_obstacle[3-obst0]",
"test/road_test.py::test_transition_probs_with_invisible_obstacle[3-obst1]",
"test/road_test.py::test_transition_probs_with_invisible_obstacle[4-obst0]",
"test/road_test.py::test_transition_probs_with_invisible_obstacle[4-obst1]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[1-0]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[1-1]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[1-2]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[1-3]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[1-4]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[2-0]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[2-1]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[2-2]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[2-3]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[2-4]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[3-0]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[3-1]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[3-2]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[3-3]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[3-4]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[4-0]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[4-1]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[4-2]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[4-3]",
"test/road_test.py::test_driving_faster_gives_a_larger_reward[4-4]",
"test/road_test.py::test_road_cannot_start_with_car_going_faster_than_speed_limit",
"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[0-car0]",
"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[0-car1]",
"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[1-car0]",
"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[1-car1]",
"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[2-car0]",
"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[2-car1]",
"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[3-car0]",
"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[3-car1]",
"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[4-car0]",
"test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[4-car1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-0-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-0-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-1-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-1-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-2-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-2-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-3-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-3-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-4-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-4-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-0-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-0-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-1-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-1-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-2-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-2-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-3-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-3-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-4-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-4-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-0-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-0-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-1-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-1-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-2-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-2-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-3-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-3-obst1]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-4-obst0]",
"test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-4-obst1]",
"test/road_test.py::test_speed_limit_equals_number_of_rows_plus_one"
] | [] | [] | [] | MIT License | 2,659 | 653 | [
"driving_gridworld/road.py"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.